repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
dr-leo/pandaSDMX | pandasdmx/util.py | 1 | 9243 | import collections
import logging
import typing
from enum import Enum
from typing import TYPE_CHECKING, Any, List, Type, TypeVar, Union, no_type_check
import pydantic
from pydantic import DictError, Extra, ValidationError, validator # noqa: F401
from pydantic.class_validators import make_generic_validator
KT = TypeVar("KT")
VT = TypeVar("VT")
try:
from typing import OrderedDict
except ImportError:
# Python < 3.7.2 compatibility; see
# https://github.com/python/cpython/commit/68b56d0
from typing import _alias # type: ignore
OrderedDict = _alias(collections.OrderedDict, (KT, VT))
log = logging.getLogger(__name__)
class Resource(str, Enum):
"""Enumeration of SDMX REST API endpoints.
====================== ================================================
:class:`Enum` member :mod:`pandasdmx.model` class
====================== ================================================
``categoryscheme`` :class:`.CategoryScheme`
``codelist`` :class:`.Codelist`
``conceptscheme`` :class:`.ConceptScheme`
``data`` :class:`.DataSet`
``dataflow`` :class:`.DataflowDefinition`
``datastructure`` :class:`.DataStructureDefinition`
``provisionagreement`` :class:`.ProvisionAgreement`
====================== ================================================
"""
# agencyscheme = 'agencyscheme'
# attachementconstraint = 'attachementconstraint'
# categorisation = 'categorisation'
categoryscheme = "categoryscheme"
codelist = "codelist"
conceptscheme = "conceptscheme"
# contentconstraint = 'contentconstraint'
data = "data"
# dataconsumerscheme = 'dataconsumerscheme'
dataflow = "dataflow"
# dataproviderscheme = 'dataproviderscheme'
datastructure = "datastructure"
# hierarchicalcodelist = 'hierarchicalcodelist'
# metadata = 'metadata'
# metadataflow = 'metadataflow'
# metadatastructure = 'metadatastructure'
# organisationscheme = 'organisationscheme'
# organisationunitscheme = 'organisationunitscheme'
# process = 'process'
provisionagreement = "provisionagreement"
# reportingtaxonomy = 'reportingtaxonomy'
# schema = 'schema'
# structure = 'structure'
# structureset = 'structureset'
@classmethod
def from_obj(cls, obj):
"""Return an enumeration value based on the class of *obj*."""
clsname = {"DataStructureDefinition": "datastructure"}.get(
obj.__class__.__name__, obj.__class__.__name__
)
return cls[clsname.lower()]
@classmethod
def describe(cls):
return "{" + " ".join(v.name for v in cls._member_map_.values()) + "}"
if TYPE_CHECKING:
Model = TypeVar("Model", bound="BaseModel")
class BaseModel(pydantic.BaseModel):
"""Shim for pydantic.BaseModel.
This class changes two behaviours in pydantic. The methods are direct
copies from pydantic's code, with marked changes.
1. https://github.com/samuelcolvin/pydantic/issues/524
- "Multiple RecursionErrors with self-referencing models"
- In e.g. :class:`.Item`, having both .parent and .child references
leads to infinite recursion during validation.
- Fix: override BaseModel.__setattr__.
- New value 'limited' for Config.validate_assignment: no sibling
field values are passed to Field.validate().
- New key Config.validate_assignment_exclude: list of field names that
are not validated per se *and* not passed to Field.validate() when
validating a sibling field.
2. https://github.com/samuelcolvin/pydantic/issues/521
- "Assignment to attribute changes id() but not referenced object,"
marked as wontfix by pydantic maintainer.
- When cls.attr is typed as BaseModel (or a subclass), then
a.attr is b.attr is always False, even when set to the same reference.
- Fix: override BaseModel.validate() without copy().
"""
class Config:
validate_assignment = "limited"
validate_assignment_exclude: List[str] = []
# Workaround for https://github.com/samuelcolvin/pydantic/issues/521
@classmethod
def validate(cls: Type["Model"], value: Any) -> "Model":
if isinstance(value, dict):
return cls(**value)
elif isinstance(value, cls):
return value # ***
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
# Workaround for https://github.com/samuelcolvin/pydantic/issues/524
@no_type_check
def __setattr__(self, name, value):
if self.__config__.extra is not Extra.allow and name not in self.__fields__:
raise ValueError(
f'"{self.__class__.__name__}" object has no field' f' "{name}"'
)
elif not self.__config__.allow_mutation:
raise TypeError(
f'"{self.__class__.__name__}" is immutable and '
"does not support item assignment"
)
elif (
self.__config__.validate_assignment
and name not in self.__config__.validate_assignment_exclude
):
if self.__config__.validate_assignment == "limited":
kw = {"include": {}}
else:
kw = {"exclude": {name}}
known_field = self.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, self.dict(**kw), loc=name)
if error_:
raise ValidationError([error_], type(self))
self.__dict__[name] = value
self.__fields_set__.add(name)
class DictLike(collections.OrderedDict, typing.MutableMapping[KT, VT]):
"""Container with features of a dict & list, plus attribute access."""
def __getitem__(self, key: Union[KT, int]) -> VT:
try:
return super().__getitem__(key)
except KeyError:
if isinstance(key, int):
return list(self.values())[key]
elif isinstance(key, str) and key.startswith("__"):
raise AttributeError
else:
raise
def __setitem__(self, key: KT, value: VT) -> None:
key = self._apply_validators("key", key)
value = self._apply_validators("value", value)
super().__setitem__(key, value)
# Access items as attributes
def __getattr__(self, name) -> VT:
try:
return self.__getitem__(name)
except KeyError as e:
raise AttributeError(*e.args) from None
def validate(cls, value, field):
if not isinstance(value, (dict, DictLike)):
raise ValueError(value)
result = DictLike()
result.__fields = {"key": field.key_field, "value": field}
result.update(value)
return result
def _apply_validators(self, which, value):
try:
field = self.__fields[which]
except AttributeError:
return value
result, error = field._apply_validators(
value, validators=field.validators, values={}, loc=(), cls=None
)
if error:
raise ValidationError([error], self.__class__)
else:
return result
def compare(self, other, strict=True):
"""Return :obj:`True` if `self` is the same as `other`.
Two DictLike instances are identical if they contain the same set of keys, and
corresponding values compare equal.
Parameters
----------
strict : bool, optional
Passed to :func:`compare` for the values.
"""
if set(self.keys()) != set(other.keys()):
log.info(f"Not identical: {sorted(self.keys())} / {sorted(other.keys())}")
return False
for key, value in self.items():
if not value.compare(other[key], strict):
return False
return True
def summarize_dictlike(dl, maxwidth=72):
"""Return a string summary of the DictLike contents."""
value_cls = dl[0].__class__.__name__
count = len(dl)
keys = " ".join(dl.keys())
result = f"{value_cls} ({count}): {keys}"
if len(result) > maxwidth:
# Truncate the list of keys
result = result[: maxwidth - 3] + "..."
return result
def validate_dictlike(*fields):
def decorator(cls):
v = make_generic_validator(DictLike.validate)
for field in fields:
cls.__fields__[field].post_validators = [v]
return cls
return decorator
def compare(attr, a, b, strict: bool) -> bool:
"""Return :obj:`True` if ``a.attr`` == ``b.attr``.
If strict is :obj:`False`, :obj:`None` is permissible as `a` or `b`; otherwise,
"""
return getattr(a, attr) == getattr(b, attr) or (
not strict and None in (getattr(a, attr), getattr(b, attr))
)
# if not result:
# log.info(f"Not identical: {attr}={getattr(a, attr)} / {getattr(b, attr)}")
# return result
| apache-2.0 |
andim/scipydirect | doc/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| mit |
adykstra/mne-python | tutorials/discussions/plot_background_filtering.py | 1 | 49893 | # -*- coding: utf-8 -*-
r"""
.. _disc-filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus (1987) [1]_ and
Ifeachor & Jervis (2002) [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* (2015) [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`tut-filter-resample` tutorial.
.. contents::
:local:
Problem statement
=================
Practical issues with filtering electrophysiological data are covered
in Widmann *et al.* (2012) [6]_, where they conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase signal-to-noise ratio (SNR), but if it
is not used carefully, it can distort data. Here we hope to cover some
filtering basics so users can better understand filtering trade-offs and why
MNE-Python has chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\
&= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\
&= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over
1. the numerator coefficients :math:`b_k`, which get multiplied by
the previous input values :math:`x(n-k)`, and
2. the denominator coefficients :math:`a_k`, which get multiplied by
the previous output values :math:`y(n-k)`.
Note that these summations correspond to (1) a weighted `moving average`_ and
(2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in Parks & Burrus (1987) [1]_, FIR and IIR have different
trade-offs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
(2015) [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [[2]_], p. 321)...
FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always trade-offs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency trade-off, and it will
show up below.
FIR Filters
===========
First, we will focus on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try to design a low-pass filter and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a magnitude
# response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude
# response of 0 in the stop-band (down to frequency :math:`f_s`) such that
# :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontinuity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in the frequency domain is actually a sinc_
# function in the time domain, which requires an infinite number of samples
# (and thus infinite time) to represent. So although this filter has ideal
# frequency suppression, it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 s, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 s) gets us a
# slightly better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 s),
# with a resulting larger x-axis:
n = int(round(10. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire 10 seconds. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`,
# :func:`scipy.signal.firwin`, and `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightforward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, we will primarily use and explore windowed FIR design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a more
# gradual slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 s filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)',
flim=flim, compensate=True)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)',
flim=flim, compensate=True)
###############################################################################
# But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# So far, we have only discussed *non-causal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current
# time point :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, especially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming non-causal), minimum-phase filters do not require any
# compensation to achieve small delays in the pass-band. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the ``minimum_phase`` function in :mod:`scipy.signal`, and note
# that the falloff is not as steep:
h_min = mne.fixes.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random and line). Note that the original clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin', verbose=True)
x_v16 = np.convolve(h, x)
# this is the linear->zero phase, causal-to-non-causal conversion / shift
x_v16 = x_v16[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim,
compensate=True)
###############################################################################
# Filter it with a different design method ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# filter_dur = 6.6 / transition_band # sec
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2', verbose=True)
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2', verbose=True)
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
# the effective h is one that is applied to the time-reversed version of itself
h_eff = np.convolve(h, h[::-1])
plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin',
verbose=True)
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially in signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
"""Plot a signal."""
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yscale = 30
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
yticks = -np.arange(len(yticklabels)) / yscale
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-len(yticks) / yscale, 1. / yscale],
yticks=yticks, yticklabels=yticklabels)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few filter orders,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# non-causal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if non-causal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim,
compensate=True)
x_shallow = signal.sosfiltfilt(sos, x)
del sos
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given :ref:`above <tut_filtering_basics>` use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used whenever possible.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response. Let's also switch to using the MNE filter
# design function, which simplifies a few things and gives us some information
# about the resulting filter:
iir_params = dict(order=8, ftype='butter')
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim,
compensate=True)
x_steep = signal.sosfiltfilt(filt['sos'], x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
iir_params.update(ftype='cheby1',
rp=1., # dB of acceptable pass-band ripple
)
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True)
###############################################################################
# If we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
iir_params['rp'] = 6.
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim,
compensate=True)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are non-causal (zero-phase), can
# make activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen (2011) [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to simulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet (2012) [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger (2012) [6]_ that
# the problematic low-pass filters from VanRullen (2011) [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* (2012) [4]_ to:
#
# "... generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* (2015) [7]_ also came to suggest a
# 0.1 Hz highpass. More evidence followed in Tanner *et al.* (2015) [8]_ of
# such distortions. Using data from language ERP studies of semantic and
# syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz
# caused significant effects to be introduced implausibly early when compared
# to the unfiltered data. From this, the authors suggested the optimal
# high-pass value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* (2015) [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass
# and high-pass filters... No visible distortion to the original
# waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck (2010) [12]_,
# they found that applying a 1 Hz high-pass decreased the probability of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 Hz or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* (2015) [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* (2016) [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* (2016) [10]_ rebutted that baseline correction can correct
# for problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* (2016) [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multi-electrode recordings
# the topology (i.e., spatial pattern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`. In Widmann *et al.* (2015) [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 50.0 | 12.5 | 12.5 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively, as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in Ifeachor & Jervis (2002) [2]_
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut-filter-resample`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M/EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in Widmann *et al.* (2015) [7]_. Briefly:
#
# * EEGLAB
# MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB
# (see the `EEGLAB filtering FAQ`_ for more information).
# * FieldTrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. the
# `FieldTrip band-pass documentation <ftbp_>`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* (2015) [7]_, there is a convenient list of
# important filter parameters that should be reported with each publication:
#
# 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. Cutoff frequency (including definition)
# 3. Filter order (or length)
# 4. Roll-off or transition bandwidth
# 5. Passband ripple and stopband attenuation
# 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. Direction of computation (one-pass forward/reverse, or two-pass forward
# and reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example, in :func:`mne.filter.create_filter`, the relevant
# arguments would be `l_freq`, `h_freg`, `method`, and if the method is FIR
# `fir_window` and `fir_design`.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40``, the filter function will provide a transition
# bandwidth that depends on the `h_trans_bandwidth` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(
data=None, # data is only used for sanity checking, not strictly needed
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length.
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll-off with the `iir_params`
# argument or stay with the default, which is a fourth order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis (2002) [2]_, p. 357):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always trade-offs that should be considered.
# One important trade-off is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. https://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# https://doi.org/10.1016/j.jneumeth.2014.08.002
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. https://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artifacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
| bsd-3-clause |
andrewcmyers/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
shyamalschandra/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
YJango/tensorflow | Py_version/FNNs_Demo/demoLV3.py | 1 | 12562 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/19 11:10
# @Author : zzy824
# @File : demoLV3.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
tf.set_random_seed(55)
np.random.seed(55)
""" add some branched based on demoLV2,X.npy and Y.npy are files including data for demo"""
class FNN(object):
"""Build a general FeedForward neural network
:param
----------
learning_rate: float
drop_out: float
Layers: list
The number of layers
N_hidden: list
The number of nodes in layers
D_input: int
Input dimension
D_label: int
Label dimension
Task_type: string
'regression' or 'classification'
L2_lambda: float
First_Author : YJango; 2016/11/25
Second_Author: zzy824;2018/6/15
"""
def __init__(self, learning_rate, Layers, N_hidden,
D_input, D_label, Task_type='regression', L2_lambda=0.0):
# the whole sharing attribute
self.learning_rate = learning_rate
self.Layers = Layers
self.N_hidden = N_hidden
self.D_input = D_input
self.D_label = D_label
# loss function controled by Task_type
self.Task_type = Task_type
# L2 regularizition's strength
self.L2_lambda = L2_lambda
# store L2 regularization for each layer
self.l2_penalty = tf.constant(0.0)
# hid_layers for storing output of all hidden layers
self.hid_layers = []
# W for storing weights of all layers
self.W = []
# b for storing biases of all layers
self.b = []
# total_l2 for storing L2 of all layers
self.total_l2 = []
# those parameters will be define in "build" function
self.train_step = None
self.output = None
self.loss = None
self.accuracy = None
self.total_loss = None
# for generating figures of tensorflow
with tf.name_scope('Input'):
self.inputs = tf.placeholder(tf.float32, [None, D_input], name="inputs")
with tf.name_scope('Label'):
self.labels = tf.placeholder(tf.float32, [None, D_label], name='labels')
with tf.name_scope('keep_rate'):
self.drop_keep_rate = tf.placeholder(tf.float32, name='dropout_keep')
# generate when initialize
self.build('F')
@staticmethod
def weight_init(shape):
"""Initialize weight of neural network and initialization could be changed here
Args:
shape: list [in_dim, out_dim]
Returns:
a Varible which is initialized by random_uniform
"""
initial = tf.random_uniform(shape, minval=-np.sqrt(5) * np.sqrt(1.0 / shape[0]),
maxval=np.sqrt(5) * np.sqrt(1.0 / shape[0]))
return tf.Variable(initial)
@staticmethod
def bias_init(shape):
"""Initialize weight of neural network and initialization could be changed here
Args:
shape: list [in_dim, out_dim]
Returns:
a Varible which is initialize by a constant
"""
# can change initialization here
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
@staticmethod
def variable_summaries(var, name):
"""For recording data in training process
Args:
var: numbers for calculating
name: names for name_scope
"""
# generate two figures display sum and mean
with tf.name_scope(name + '_summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean_' + name, mean)
with tf.name_scope(name + '_stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# record changes in value after each time training
tf.summary.scalar('_stddev_' + name, stddev)
tf.summary.scalar('_max_' + name, tf.reduce_max(var))
tf.summary.scalar('_min_' + name, tf.reduce_min(var))
tf.summary.histogram(name=name, values=var)
def layer(self, in_tensor, in_dim, out_dim, layer_name, act=tf.nn.relu):
""" a Fuction for establishing each neural layer
Args:
:param in_tensor:
:param in_dim:
:param out_dim:
:param layer_name:
:param act:
:return:
"""
with tf.name_scope(layer_name):
with tf.name_scope(layer_name+'_weights'):
# initialize weight with weight_init()
weights = self.weight_init([in_dim, out_dim])
# self.W will state before usage of this function
self.W.append(weights)
# count weight
self.variable_summaries(weights, layer_name + '_weights')
with tf.name_scope(layer_name + 'biases'):
biases = self.bias_init([out_dim])
# self.b will state before usage of this function
self.b.append(biases)
self.variable_summaries(biases, layer_name + '_biases')
with tf.name_scope(layer_name + '_Wx_plus_b'):
# calculate Wx+b
pre_activate = tf.matmul(in_tensor, weights) + biases
# count histogram
tf.summary.histogram(layer_name + '_pre_activations', pre_activate)
# calculate a(Wx+b)
activations = act(pre_activate, name='activation')
tf.summary.histogram(layer_name + '_activations', activations)
# return with output of this layer and L2_loss of weight
return activations, tf.nn.l2_loss(weights)
def drop_layer(self, in_tensor):
""" dropout layer of nerual network
:param in_tensor:
:return:
"""
# tf.scalar_summary('dropout_keep', self.drop_keep_rate)
dropped = tf.nn.dropout(in_tensor, self.drop_keep_rate)
return dropped
def build(self, prefix):
# build network
# incoming represent the position of current tensor
incoming = self.inputs
# if not hidden layer
if self.Layers != 0:
layer_nodes = [self.D_input] + self.N_hidden
else:
layer_nodes = [self.D_input]
# build hidden layers
for l in range(self.Layers):
# build layers through self.layers and refresh the position of incoming
incoming, l2_loss = self.layer(incoming, layer_nodes[l], layer_nodes[l + 1], prefix + '_hid_' + str(l + 1),
act=tf.nn.relu)
# count l2
self.total_l2.append(l2_loss)
# print some messages of what happened in nerual network
print('Add dense layer: relu')
print(' %sD --> %sD' % (layer_nodes[l], layer_nodes[l + 1]))
# store outputs of hidden layer
self.hid_layers.append(incoming)
# add dropout layer
incoming = self.drop_layer(incoming)
# build output layer as activation functions usually change with specific tasks:
# if the task is regression then we will use tf.identity rather than activation function
if self.Task_type == 'regression':
out_act = tf.identity
else:
# if the task is classification then we will use softmax to fitting probability
out_act = tf.nn.softmax
self.output, l2_loss = self.layer(incoming, layer_nodes[-1], self.D_label, layer_name='output', act=out_act)
print('Add output layer: linear')
print(' %sD --> %sD' % (layer_nodes[-1], self.D_label))
# l2 loss's zoom figure
with tf.name_scope('total_l2'):
for l2 in self.total_l2:
self.l2_penalty += l2
tf.summary.scalar('l2_penalty', self.l2_penalty)
# loss of different figures:
# if task's type is regression, the loss function is for judging difference value
# between prediction and actual value
if self.Task_type == 'regression':
with tf.name_scope('SSE'):
self.loss = tf.reduce_mean((self.output - self.labels) ** 2)
tf.summary.scalar('loss', self.loss)
else:
# if task's type is classification, the loss function is cross entrophy
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.output, labels=self.labels)
with tf.name_scope('cross_entropy'):
self.loss = tf.reduce_mean(entropy)
tf.scalar_summary('loss', self.loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(self.output, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', self.accuracy)
# aggregate all losses
with tf.name_scope('total_loss'):
self.total_loss = self.loss + self.l2_penalty * self.L2_lambda
tf.summary.scalar('total_loss', self.total_loss)
# operation of training
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss)
# shuffle function
@staticmethod
def shufflelists(lists):
ri = np.random.permutation(len(lists[1]))
out = []
for l in lists:
out.append(l[ri])
return out
def Standardize(seq):
"""
:param seq:
:return:
"""
# subtract mean
centerized = seq-np.mean(seq, axis=0)
# divide standard deviation
normalized = centerized/np.std(centerized, axis=0)
return normalized
def Makewindows(indata, window_size=41):
outdata = []
mid = int(window_size/2)
indata = np.vstack((np.zeros((mid, indata.shape[1])), indata, np.zeros((mid, indata.shape[1]))))
for index in range(indata.shape[0]-window_size+1):
outdata.append(np.hstack(indata[index: index + window_size]))
return np.array(outdata)
# prepare some data for training "XOR"
mfc = np.load('X.npy')
art = np.load('Y.npy')
x = []
y = []
for i in range(len(mfc)):
x.append(Makewindows(Standardize(mfc[i])))
y.append(Standardize(art[i]))
vali_size = 20
totalsamples = len(np.vstack(x))
X_train = np.vstack(x)[int(totalsamples/vali_size):].astype("float32")
Y_train = np.vstack(y)[int(totalsamples/vali_size):].astype("float32")
X_test = np.vstack(x)[:int(totalsamples/vali_size)].astype("float32")
Y_test = np.vstack(y)[:int(totalsamples/vali_size)].astype("float32")
# print the shape of train and test data
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
# generate instance of neural network
ff = FNN(learning_rate=7e-5,
Layers=5,
N_hidden=[2048, 1024, 512, 256, 128],
D_input=1599,
D_label=24,
L2_lambda=1e-4)
# loading
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('log3' + '/train', sess.graph)
test_writer = tf.summary.FileWriter('log3' + '/test')
#
def plots(T, P, i, n=21, length=400):
m = 0
plt.figure(figsize=(20, 16))
plt.subplot(411)
plt.plot(T[m:m + length, 7], '--')
plt.plot(P[m:m + length, 7])
plt.subplot(412)
plt.plot(T[m:m + length, 8], '--')
plt.plot(P[m:m + length, 8])
plt.subplot(413)
plt.plot(T[m:m + length, 15], '--')
plt.plot(P[m:m + length, 15])
plt.subplot(414)
plt.plot(T[m:m + length, 16], '--')
plt.plot(P[m:m + length, 16])
plt.legend(['True', 'Predicted'])
plt.savefig('epoch' + str(i) + '.png')
plt.close()
# training and record
k = 0
Batch = 32
for i in range(1):
idx = 0
X0, Y0 = ff.shufflelists([X_train, Y_train])
while idx < X_train.shape[0]:
summary, _ = sess.run([merged, ff.train_step], feed_dict={ff.inputs: X0[idx:idx+Batch], ff.labels: Y0[idx:idx+Batch], ff.drop_keep_rate: 1.0}) # when set "keep rate = 1" means unuse of dropout
idx += Batch
k += 1
train_writer.add_summary(summary, k)
# test
summary, pY, pL = sess.run([merged, ff.output, ff.loss], feed_dict={ff.inputs: X_test, ff.labels: Y_test, ff.drop_keep_rate: 1.0})
plots(Y_test, pY, i)
test_writer.add_summary(summary, k)
print('epoch%s | train_loss:%s |test_loss:%s' % (i, sess.run(ff.loss,feed_dict={ff.inputs: X0, ff.labels: Y0, ff.drop_keep_rate: 1.0}), sess.run(ff.loss, feed_dict={ff.inputs: X_test, ff.labels: Y_test, ff.drop_keep_rate: 1.0}))) | gpl-3.0 |
anntzer/scikit-learn | sklearn/decomposition/_fastica.py | 7 | 21041 | """
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..exceptions import ConvergenceWarning
from ..utils import check_array, as_float_array, check_random_state
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _deprecate_positional_args
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W.
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.linalg.multi_dot([u * (1. / np.sqrt(s)), u.T, W])
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.',
ConvergenceWarning)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
@_deprecate_positional_args
def fastica(X, n_components=None, *, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, default=None
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply a parallel or deflational FASTICA algorithm.
whiten : bool, default=True
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-04
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
*A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430*
"""
est = FastICA(n_components=n_components, algorithm=algorithm,
whiten=whiten, fun=fun, fun_args=fun_args,
max_iter=max_iter, tol=tol, w_init=w_init,
random_state=random_state)
sources = est._fit(X, compute_sources=compute_sources)
if whiten:
if return_X_mean:
if return_n_iter:
return (est.whitening_, est._unmixing, sources, est.mean_,
est.n_iter_)
else:
return est.whitening_, est._unmixing, sources, est.mean_
else:
if return_n_iter:
return est.whitening_, est._unmixing, sources, est.n_iter_
else:
return est.whitening_, est._unmixing, sources
else:
if return_X_mean:
if return_n_iter:
return None, est._unmixing, sources, None, est.n_iter_
else:
return None, est._unmixing, sources, None
else:
if return_n_iter:
return None, est._unmixing, sources, est.n_iter_
else:
return None, est._unmixing, sources
class FastICA(TransformerMixin, BaseEstimator):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Apply parallel or deflational algorithm for FastICA.
whiten : bool, default=True
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations during fit.
tol : float, default=1e-4
Tolerance on update at each iteration.
w_init : ndarray of shape (n_components, n_components), default=None
The mixing matrix to be used to initialize the algorithm.
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear operator to apply to the data to get the independent
sources. This is equal to the unmixing matrix when ``whiten`` is
False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
``whiten`` is True.
mixing_ : ndarray of shape (n_features, n_components)
The pseudo-inverse of ``components_``. It is the linear operator
that maps independent sources to the data.
mean_ : ndarray of shape(n_features,)
The mean over features. Only set if `self.whiten` is True.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
whitening_ : ndarray of shape (n_components, n_features)
Only set if whiten is 'True'. This is the pre-whitening matrix
that projects data onto the first `n_components` principal components.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0)
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
Notes
-----
Implementation based on
*A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430*
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super().__init__()
if max_iter < 1:
raise ValueError("max_iter should be greater than 1, got "
"(max_iter={})".format(max_iter))
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
X = self._validate_data(X, copy=self.whiten, dtype=FLOAT_DTYPES,
ensure_min_samples=2).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if self.fun == 'logcosh':
g = _logcosh
elif self.fun == 'exp':
g = _exp
elif self.fun == 'cube':
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
else:
exc = ValueError if isinstance(self.fun, str) else TypeError
raise exc(
"Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% self.fun
)
n_samples, n_features = X.shape
n_components = self.n_components
if not self.whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n_samples, n_features)
if (n_components > min(n_samples, n_features)):
n_components = min(n_samples, n_features)
warnings.warn(
'n_components is too large: it will be set to %s'
% n_components
)
if self.whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False, check_finite=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_features)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(random_state.normal(
size=(n_components, n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
'w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': self.tol,
'g': g,
'fun_args': fun_args,
'max_iter': self.max_iter,
'w_init': w_init}
if self.algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if compute_sources:
if self.whiten:
S = np.linalg.multi_dot([W, K, X]).T
else:
S = np.dot(W, X).T
else:
S = None
self.n_iter_ = n_iter
if self.whiten:
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
"""
check_is_fitted(self)
X = self._validate_data(X, copy=(copy and self.whiten),
dtype=FLOAT_DTYPES, reset=False)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
| bsd-3-clause |
inpefess/kaggle_competitions | cats_vs_dogs/predict.py | 1 | 1193 | import argparse
import pandas as pd
from keras.models import load_model
from cats_vs_dogs.config import data_dir
from cats_vs_dogs.data_generators import DataGenerators
def save_predictions(
model_file: str,
submission_filename: str = "submission.csv"
):
batch_size = 20
model = load_model(model_file)
data_generators = DataGenerators(batch_size, data_dir)
test_data_generator = data_generators.get_data_generator("test")
predictions = pd.DataFrame(
[filename[filename.index("/") + 1:filename.index(".")]
for filename in test_data_generator.filenames],
columns=["id"]
)
assert (test_data_generator.samples % batch_size == 0)
predictions["label"] = model.predict_generator(
test_data_generator,
steps=test_data_generator.samples // batch_size,
verbose=1
)
predictions.to_csv(
submission_filename,
header=True,
index=False
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-file",
type=str,
required=True,
)
args = parser.parse_args()
save_predictions(args.model_file)
| mit |
IshankGulati/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 53 | 13398 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X) | bsd-3-clause |
Karel-van-de-Plassche/QLKNN-develop | qlknn/misc/random_access_benchmark.py | 1 | 3875 | import xarray as xr
from IPython import embed
import numpy as np
from itertools import product
import pandas as pd
#import dask.dataframe as df
import time
#import dask.array as da
import numpy as np
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in range(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
def timewrapper(func, *args, **kwargs):
start = time.time()
func(*args, **kwargs)
print('Took ' + str(time.time() - start) + 's')
ds = xr.open_dataset('/global/cscratch1/sd/karel//Zeff_combined.nc')
ds = ds.drop([name for name, value in ds.data_vars.items() if 'kthetarhos' in value.dims])
ds = ds.drop([x for x in ds.coords if x not in ds.dims])
ds = ds.drop(['kthetarhos'])
ds = ds.max('numsols')
dimx = np.prod([x for x in ds.dims.values()])
traindim = 'efe_GB'
#fakeindex = cartesian(*[x for x in ds.dims.values()])
#panda = pd.read_hdf('/global/cscratch1/sd/karel/index.h5')
#random = np.random.permutation(np.arange(len(panda)))
#daarray = da.from_array(nparray, (10000, len(ds.dims)))
def iter_all(numsamp):
start = time.time()
cart = cartesian(ds.coords.values())
nparray = np.empty((dimx, 9))
for ii, foo in enumerate(product(*ds.coords.values())):
nparray[ii, :] = list(map(float, foo))
nparray[ii, :] = foo
if ii > numsamp:
break
return (time.time() - start)
def get_panda_ic_sample(numsamp, epoch=0):
start = time.time()
set = panda.sample(numsamp)
return (time.time() - start)
def get_panda_ic_npindex(numsamp, epoch=0):
start = time.time()
set = panda.iloc[random[epoch:(epoch + 1) * numsamp]]
return (time.time() - start)
def get_panda_ic_npreindex(numsamp, epoch=0):
start = time.time()
idx = np.random.randint(0, dimx, numsamp)
set = panda.iloc[idx]
return (time.time() - start)
def get_xarray(numsamp, epoch=0):
start = time.time()
ds[traindim].isel_points(**{name: np.random.randint(0, len(value), numsamp) for name, value in ds['efe_GB'].coords.items()})
return (time.time() - start)
strats = {
# 'panda_ic_sample': get_panda_ic_sample,
'get_xarray': get_xarray,
# 'panda_ic_npindex': get_panda_ic_npindex,
# 'panda_ic_npreindex': get_panda_ic_npindex
}
numsamps = [1e3, 1e5, 1e6, 1e7, 1e8, dimx]
results = pd.DataFrame(columns=strats.keys(), index=[numsamps[0]])
numepochs = 3
embed()
for numsamp in numsamps:
results.loc[numsamp] = None
for name, func in strats.items():
result = []
for epoch in range(numepochs):
result.append(func(numsamp, epoch))
print(name, numsamp, str(epoch) + '/' + str(numepochs))
print(name, numsamp, result[epoch])
results[name].loc[numsamp] = np.mean(result)
print(results)
results.to_csv('benchmark_result.csv')
| mit |
bmcfee/ismir2017_chords | code/train_model.py | 1 | 19274 | #!/usr/bin/env python
'''Model construction and training script'''
import argparse
import os
import sys
from collections import defaultdict
from glob import glob
import six
import pickle
import numpy as np
import pandas as pd
import keras as K
from tqdm import tqdm
from sklearn.model_selection import ShuffleSplit
import jams
import pescador
import librosa
def process_arguments(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--max_samples', dest='max_samples', type=int,
default=128,
help='Maximum number of samples to draw per streamer')
parser.add_argument('--patch-duration', dest='duration', type=float,
default=8.0,
help='Duration (in seconds) of training patches')
parser.add_argument('--seed', dest='seed', type=int,
default='20170412',
help='Seed for the random number generator')
parser.add_argument('--reference-path', dest='refs', type=str,
default=os.path.join(os.environ['HOME'],
'data', 'eric_chords', 'references_v2'),
help='Path to reference annotations')
parser.add_argument('--working', dest='working', type=str,
default=os.path.join(os.environ['HOME'],
'working', 'chords'),
help='Path to working directory')
parser.add_argument('--structured', dest='structured', action='store_true',
help='Enable structured training')
parser.add_argument('--augmentation', dest='augmentation',
action='store_true',
help='Enable data augmentation')
parser.add_argument('--weighted', dest='weighted', action='store_true',
help='Enable weighted sampling for training')
parser.add_argument('--static', dest='temporal', action='store_false',
help='Use static weighting instead of temporal weighting')
parser.add_argument('--train-streamers', dest='train_streamers', type=int,
default=1024,
help='Number of active training streamers')
parser.add_argument('--batch-size', dest='batch_size', type=int,
default=32,
help='Size of training batches')
parser.add_argument('--rate', dest='rate', type=int,
default=8,
help='Rate of pescador stream deactivation')
parser.add_argument('--epochs', dest='epochs', type=int,
default=100,
help='Maximum number of epochs to train for')
parser.add_argument('--epoch-size', dest='epoch_size', type=int,
default=512,
help='Number of batches per epoch')
parser.add_argument('--validation-size', dest='validation_size', type=int,
default=1024,
help='Number of batches per validation')
parser.add_argument('--early-stopping', dest='early_stopping', type=int,
default=20,
help='# epochs without improvement to stop')
parser.add_argument('--reduce-lr', dest='reduce_lr', type=int,
default=10,
help='# epochs without improvement to reduce learning rate')
return parser.parse_args(args)
def make_sampler(max_samples, duration, pump, seed):
n_frames = librosa.time_to_frames(duration,
sr=pump['cqt'].sr,
hop_length=pump['cqt'].hop_length)[0]
return pump.sampler(max_samples, n_frames, random_state=seed)
def data_sampler(fname, sampler):
'''Generate samples from a specified npz file'''
data = np.load(fname)
d2 = dict(data)
data.close()
data = d2
for datum in sampler(data):
yield datum
def data_generator(working, tracks, sampler, k, batch_size=32,
augmentation=False, weights=None, **kwargs):
'''Generate a data stream from a collection of tracks and a sampler'''
seeds = []
pool_weights = []
for track in tracks:
fname = os.path.join(working, 'pump',
os.path.extsep.join([track, 'npz']))
seeds.append(pescador.Streamer(data_sampler, fname, sampler))
if weights is not None:
pool_weights.append(weights.loc[track])
if augmentation:
for fname in sorted(glob(os.path.join(working, 'pump',
'{}.*.npz'.format(track)))):
seeds.append(pescador.Streamer(data_sampler, fname, sampler))
if weights is not None:
pool_weights.append(weights.loc[track])
# Send it all to a mux
if not pool_weights:
pool_weights = None
mux = pescador.Mux(seeds, k, pool_weights=pool_weights, **kwargs)
if batch_size == 1:
return mux
else:
return pescador.BufferedStreamer(mux, batch_size)
def keras_tuples(gen, inputs=None, outputs=None):
if isinstance(inputs, six.string_types):
if isinstance(outputs, six.string_types):
# One input, one output
for datum in gen:
yield (datum[inputs], datum[outputs])
else:
# One input, multi outputs
for datum in gen:
yield (datum[inputs], [datum[o] for o in outputs])
else:
if isinstance(outputs, six.string_types):
for datum in gen:
yield ([datum[i] for i in inputs], datum[outputs])
else:
# One input, multi outputs
for datum in gen:
yield ([datum[i] for i in inputs],
[datum[o] for o in outputs])
def estimate_class_annotation(ann, op, quality_only):
weights = defaultdict(lambda: 0.0)
intervals, values = ann.data.to_interval_values()
for ival, chord in zip(intervals, values):
chord = op.simplify(chord)
if quality_only:
chord = reduce_chord(chord)
weights[chord] += ival[1] - ival[0]
return weights, np.max(intervals)
def reduce_chord(c):
if ':' in c:
return c[c.rindex(':')+1:]
else:
return c
def estimate_class_weights(refs, tracks, op, pseudo=1e-2, quality_only=True):
seeds = [os.path.join(refs, os.path.extsep.join([track, 'jams']))
for track in tracks]
vocab = op.vocabulary()
if quality_only:
vocab = set([reduce_chord(c) for c in vocab])
weights = {k: pseudo for k in vocab}
total = 0.0
for jam_in in tqdm(seeds, desc='Estimating class distribution'):
jam = jams.load(jam_in, validate=False)
for ann in jam.annotations['chord']:
weights_i, duration_i = estimate_class_annotation(ann, op,
quality_only)
total += duration_i
for k in weights_i:
weights[k] += weights_i[k]
for k in weights:
weights[k] /= total
return weights
def weight_track(refs, track, class_weights, op, quality_only=True,
aggregate=np.max, temporal=True):
jam_in = os.path.join(refs, os.path.extsep.join([track, 'jams']))
jam = jams.load(jam_in, validate=False)
weight = []
for ann in jam.annotations['chord']:
weights_i, duration_i = estimate_class_annotation(ann, op,
quality_only)
phat = 0.0
if not temporal:
weights_i = set(weights_i.keys())
for k in weights_i:
if temporal:
phat += weights_i[k] / duration_i * np.log(class_weights[k])
else:
phat += np.log(class_weights[k]) / len(weights_i)
weight.append(np.exp(-phat))
return aggregate(weight)
def weight_tracks(refs, tracks, *args, **kwargs):
weights = {}
for track in tqdm(tracks, desc='Estimating track importance weights'):
weights[track] = weight_track(refs, track, *args, **kwargs)
return pd.Series(data=weights)
def construct_model(pump, structured):
INPUTS = 'cqt/mag'
# Build the input layer
x = pump.layers()[INPUTS]
# Apply batch normalization
x_bn = K.layers.BatchNormalization()(x)
# First convolutional filter: a single 5x5
conv1 = K.layers.Convolution2D(1, (5, 5),
padding='same',
activation='relu',
data_format='channels_last')(x_bn)
# Second convolutional filter: a bank of full-height filters
conv2 = K.layers.Convolution2D(36, (1, int(conv1.shape[2])),
padding='valid', activation='relu',
data_format='channels_last')(conv1)
# Squeeze out the frequency dimension
squeeze = K.layers.Lambda(lambda z: K.backend.squeeze(z, axis=2))(conv2)
# BRNN layer
rnn = K.layers.Bidirectional(K.layers.GRU(256,
return_sequences=True))(squeeze)
if structured:
# 1: pitch class predictor
pc = K.layers.Dense(pump.fields['chord_struct/pitch'].shape[1],
activation='sigmoid')
pc_p = K.layers.TimeDistributed(pc, name='chord_pitch')(rnn)
# 2: root predictor
root = K.layers.Dense(13, activation='softmax')
root_p = K.layers.TimeDistributed(root, name='chord_root')(rnn)
# 3: bass predictor
bass = K.layers.Dense(13, activation='softmax')
bass_p = K.layers.TimeDistributed(bass, name='chord_bass')(rnn)
# 4: merge layer
codec = K.layers.concatenate([rnn, pc_p, root_p, bass_p])
p0 = K.layers.Dense(len(pump['chord_tag'].vocabulary()),
activation='softmax',
bias_regularizer=K.regularizers.l2())
tag = K.layers.TimeDistributed(p0, name='chord_tag')(codec)
model = K.models.Model(x, [tag, pc_p, root_p, bass_p])
OUTPUTS = ['chord_tag/chord',
'chord_struct/pitch',
'chord_struct/root',
'chord_struct/bass']
else:
p0 = K.layers.Dense(len(pump['chord_tag'].vocabulary()),
activation='softmax',
bias_regularizer=K.regularizers.l2())
tag = K.layers.TimeDistributed(p0, name='chord_tag')(rnn)
model = K.models.Model(x, [tag])
OUTPUTS = ['chord_tag/chord']
return model, INPUTS, OUTPUTS
def make_output_path(working, structured, augmentation, weighted, temporal=True):
subdir = 'model'
if structured:
subdir += '_struct'
if augmentation:
subdir += '_aug'
if weighted:
subdir += '_weighted'
if not temporal:
subdir += '_static'
outdir = os.path.join(working, subdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def score_model(pump, model, idx, working, refs, structured):
results = {}
for item in tqdm(idx, desc='Evaluating the model'):
jam = jams.load('{}/{}.jams'.format(refs, item), validate=False)
datum = np.load('{}/pump/{}.npz'.format(working, item))['cqt/mag']
output = model.predict(datum)[0]
if structured:
output = output[0]
ann = pump['chord_tag'].inverse(output)
results[item] = jams.eval.chord(jam.annotations['chord', 0], ann)
return pd.DataFrame.from_dict(results, orient='index')[['root', 'thirds',
'triads', 'tetrads',
'mirex', 'majmin',
'sevenths']]
def run_experiment(working, refs, max_samples, duration, structured,
augmentation, weighted, temporal, rate,
batch_size, epochs, epoch_size, validation_size,
early_stopping, reduce_lr, seed):
'''
Parameters
----------
working : str
directory that contains the experiment data (npz)
refs : str
directory that contains reference annotations (jams)
max_samples : int
Maximum number of samples per streamer
duration : float
Duration of training patches
structured : bool
Whether or not to use structured training
augmentation : bool
Whether to use data augmentation
weighted : bool
Whether to use weighted sampling
temporal : bool
If using weighting, whether it's static or temporal
batch_size : int
Size of batches
rate : int
Poisson rate for pescador
epochs : int
Maximum number of epoch
epoch_size : int
Number of batches per epoch
validation_size : int
Number of validation batches
early_stopping : int
Number of epochs before early stopping
reduce_lr : int
Number of epochs before reducing learning rate
seed : int
Random seed
'''
# Load the pump
with open(os.path.join(working, 'pump.pkl'), 'rb') as fd:
pump = pickle.load(fd)
# Build the sampler
sampler = make_sampler(max_samples, duration, pump, seed)
N_SPLITS = 5
N_SPLITS = 1
for split in range(N_SPLITS):
# Build the model
model, inputs, outputs = construct_model(pump, structured)
# Load the training data
idx_train_ = pd.read_csv(os.path.join(working,
'train{:02d}.csv'.format(split)),
header=None, names=['id'])
# Split the training data into train and validation
splitter_tv = ShuffleSplit(n_splits=1, test_size=0.25,
random_state=seed)
train, val = next(splitter_tv.split(idx_train_))
idx_train = idx_train_.iloc[train]
idx_val = idx_train_.iloc[val]
if weighted:
chord_weights = estimate_class_weights(refs,
idx_train['id'].values,
pump['chord_tag'],
quality_only=True)
train_weights = weight_tracks(refs,
idx_train['id'].values,
chord_weights,
pump['chord_tag'],
quality_only=True,
temporal=temporal)
else:
train_weights = pd.Series(data={k: 1.0
for k in idx_train['id'].values})
gen_train = data_generator(working,
train_weights.index, sampler, epoch_size,
augmentation=augmentation,
lam=rate,
batch_size=batch_size,
revive=True,
weights=train_weights,
random_state=seed)
gen_train = keras_tuples(gen_train(), inputs=inputs, outputs=outputs)
gen_val = data_generator(working,
idx_val['id'].values, sampler, len(idx_val),
batch_size=batch_size,
revive=True,
random_state=seed)
gen_val = keras_tuples(gen_val(), inputs=inputs, outputs=outputs)
loss = {'chord_tag': 'sparse_categorical_crossentropy'}
metrics = {'chord_tag': 'sparse_categorical_accuracy'}
if structured:
loss.update(chord_pitch='binary_crossentropy',
chord_root='sparse_categorical_crossentropy',
chord_bass='sparse_categorical_crossentropy')
monitor = 'val_chord_tag_loss'
else:
monitor = 'val_loss'
model.compile(K.optimizers.Adam(), loss=loss, metrics=metrics)
# Create output path
output_path = make_output_path(working, structured,
augmentation, weighted,
temporal=temporal)
# Store the model
model_spec = K.utils.serialize_keras_object(model)
with open(os.path.join(output_path,
'fold{:02d}_model.pkl'.format(split)),
'wb') as fd:
pickle.dump(model_spec, fd)
# Construct the weight path
weight_path = os.path.join(output_path,
'fold{:02d}_weights.pkl'.format(split))
# Build the callbacks
cb = []
cb.append(K.callbacks.ModelCheckpoint(weight_path,
save_best_only=True,
verbose=1,
monitor=monitor))
cb.append(K.callbacks.ReduceLROnPlateau(patience=reduce_lr,
verbose=1,
monitor=monitor))
cb.append(K.callbacks.EarlyStopping(patience=early_stopping,
verbose=1,
monitor=monitor))
# Fit the model
model.fit_generator(gen_train, epoch_size, epochs,
validation_data=gen_val,
validation_steps=validation_size,
callbacks=cb)
###
# Now test the model
# Load the best weights
model.load_weights(weight_path)
# Load the testing data
idx_test = pd.read_csv(os.path.join(working,
'test{:02d}.csv'.format(split)),
header=None, names=['id'])
test_scores = score_model(pump, model, idx_test['id'], working, refs, structured)
output_scores = os.path.join(output_path,
'fold{:02d}_test.csv'.format(split))
test_scores.to_csv(output_scores)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
print(params)
run_experiment(params.working, params.refs,
params.max_samples, params.duration,
params.structured, params.augmentation, params.weighted, params.temporal,
params.rate,
params.batch_size,
params.epochs, params.epoch_size,
params.validation_size,
params.early_stopping,
params.reduce_lr,
params.seed)
| bsd-2-clause |
aliciawyy/CompInvest | portfolio_frontier.py | 1 | 4645 | """
This file will store the function which will determine
the efficient frontier
@author: Alicia Wang
@date: 4 Oct 2014
"""
# QSTK Imports
import QSTK.qstkutil.tsutil as tsu
# Third Party import
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from load.load_ticker import load_valid_cac40_names
from load.load_data import load_stock_close_price
from portfolio import BasicPortfolio, get_daily_return0
def get_frontier(basic_portfolio, ref_symbol, filename="EquitiesvFrontier.pdf",
target_return=0.015):
"""
@param basic_portfolio
@param ref_symbol reference symbol
"""
assert isinstance(basic_portfolio, BasicPortfolio)
stock_close_price = basic_portfolio.get_stock_close_prices()
stock_normalized_price = stock_close_price.values / stock_close_price.values[0, :]
ref_close_price = load_stock_close_price(basic_portfolio.start_date,
basic_portfolio.end_date, [ref_symbol])
ref_normalized_price = ref_close_price.values / ref_close_price.values[0, :]
daily_return0 = get_daily_return0(stock_normalized_price)
(na_avgrets, na_std, b_error) = tsu.OptPort(daily_return0, None)
# Declaring bounds on the optimized portfolio
na_lower = np.zeros(daily_return0.shape[1])
na_upper = np.ones(daily_return0.shape[1])
# Getting the range of possible returns with these bounds
(f_min, f_max) = tsu.getRetRange(daily_return0, na_lower, na_upper,
na_avgrets, s_type="long")
# Getting the step size and list of returns to optimize for.
f_step = (f_max - f_min) / 100.0
lf_returns = [f_min + x * f_step for x in range(101)]
# Declaring empty lists
lf_std = []
lna_portfolios = []
# Calling the optimization for all returns
for f_target in lf_returns:
(na_weights, f_std, b_error) = \
tsu.OptPort(daily_return0, f_target, na_lower, na_upper, s_type="long")
lf_std.append(f_std)
lna_portfolios.append(na_weights)
f_target = target_return
(na_weights, f_std, b_error) = \
tsu.OptPort(daily_return0, f_target, na_lower, na_upper, s_type="long")
print 'Optimized portfolio for target return', f_target
print 'Volatility is ', f_std
for ticker_name, weight in zip(basic_portfolio.ticker_names, na_weights):
if weight > 0.00001:
print ticker_name, ':', weight
plt.clf()
plt.figure(figsize=(8, 10), dpi=100)
# Plot individual stock risk/return as green +
for i in range(len(basic_portfolio.ticker_names)):
# plt.plot(na_std[i], f_ret, 'g+')
# plt.text(na_std[i], f_ret, ls_names[i], fontsize = 10)
ave = np.average(daily_return0[:, i])
std = np.std(daily_return0[:, i])
plt.plot(std, ave, 'g+')
plt.text(std, ave, basic_portfolio.ticker_names[i], fontsize=5)
ref_daily_return = get_daily_return0(ref_normalized_price)
ave = np.average(ref_daily_return)
std = np.std(ref_daily_return)
plt.plot(std, ave, 'r+')
plt.text(std, ave, 'CAC 40', fontsize=6)
plt.plot(lf_std, lf_returns, 'b')
plt.title('Efficient Frontier For CAC 40')
# plt.legend(['2013 Frontier'], loc = 'lower left')
plt.ylabel('Expected Return')
plt.xlabel('StDev')
if filename is None:
plt.show()
else:
plt.savefig(filename, format='pdf')
return na_weights
def optimize(basic_portfolio, ref_symbol, filename="portfoliovCAC40.pdf", target_return=0.02):
"""
@param basic_portfolio: Basic portfolio
@param ref_symbol reference
@return alloc allocation of equities
"""
optimized_allocation = get_frontier(basic_portfolio, ref_symbol, filename, target_return)
basic_portfolio.plot_with_reference(optimized_allocation, ref_symbol)
def test_small_portfolio():
symbols = ["AIR.PA", "LG.PA", "GLE.PA", "DG.PA"]
ref_symbol = '^FCHI'
end_date = dt.datetime.today()
start_date = end_date - dt.timedelta(days=365)
basic_portfolio = BasicPortfolio(symbols, start_date, end_date)
optimize(basic_portfolio, ref_symbol, filename=None, target_return=0.012)
def test_cac40_portfolio():
ref_symbol = '^FCHI'
end_date = dt.datetime.today()
start_date = end_date - dt.timedelta(days=365)
cac40 = load_valid_cac40_names()
basic_portfolio = BasicPortfolio(cac40.index, start_date, end_date, cac40.values)
optimize(basic_portfolio, ref_symbol, filename="EquitiesvFrontier2015.pdf")
if __name__ == '__main__':
test_small_portfolio()
# test_cac40_portfolio()
| mit |
thilbern/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
Anmol-Singh-Jaggi/Recommend | without-DB/python/uu.py | 1 | 3767 | import scipy as sp
import time
import pickle
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import math
from collections import defaultdict
f = open("/home/goel/rec/data/u1.base",'r')
user_train = {}
user_mean = {}
user_ratings = {}
simy = {}
n_all = {}
def init_user_train():
for line in f:
# token = [int(n) for n in line.split()
token = line.split()
if token[0] not in user_train:
user_train.setdefault(token[0],{})
user_mean.setdefault(token[0],float(0))
user_ratings.setdefault(token[0],0)
user_train[token[0]].update({token[1]:float(token[2])})
user_ratings[token[0]] += 1
user_mean[token[0]] += float(token[2])
for key in user_mean:
user_mean[key] = user_mean[key]/user_ratings[key]
def compute_sim():
# store = open('/home/goel/rec/simy_dump.txt','a')
for u in user_train:
for v in user_train:
num = 0.0
norm_u = 0.0
norm_v = 0.0
if u not in simy:
simy.setdefault(u,{})
if(u!=v):
for x in user_train[u]:
if x in user_train[v]:
r_u = user_train[u][x]-user_mean[u]
r_v = user_train[v][x]-user_mean[v]
# print str(r_u) + " " + str(r_v)
num += r_u * r_v
norm_u += math.fabs(r_u*r_u)
norm_v += math.fabs(r_v*r_v)
norm_u = math.sqrt(norm_u)
norm_v = math.sqrt(norm_v)
ans = (num+1)/(norm_u*norm_v+1)
# if ans>1.01 or ans<-1.01:
# print u + " & " + v + " : " + str(ans)
simy[u].update( {v : (num+1)/(norm_u*norm_v+1)} )
else:
simy[u].update({v:float(1)})
# store.write(u+'\t'+v+'\t'+str(simy[u][v])+'\n')
def find_neighbors(n_size):
for u in user_train:
n_all.setdefault(u,{})
minm = 1.1
vert = u
for v in simy[u]:
if len(n_all[u]) < n_size:
if minm>simy[u][v]:
minm = simy[u][v]
vert = v
n_all[u].update({v:float(simy[u][v])})
else:
if minm<simy[u][v]:
minm = simy[u][v]
del n_all[u][vert]
vert = v
n_all[u].update({v:float(simy[u][v])})
n_size = 1000
def predict():
f1 = open("/home/goel/rec/data/u1.test",'r')
f2 = open("/home/goel/rec/uu_pred." +str(n_size) ,'w')
for line in f1:
token = line.split()
pred = 0.0
norm = 0.0
for v in n_all[token[0]]:
if token[1] in user_train[v]:
rating = user_train[v][token[1]] - user_mean[v]
sim = simy[token[0]][v]
pred += rating*sim
# print pred
norm += abs(sim)
ans = user_mean[token[0]] + (pred+1)/(norm+1)
if ans>5:
ans = 5
if ans<1:
ans = 1
f2.write(token[0]+'\t'+token[1]+'\t'+token[2]+'\t'+str(ans)+'\n')
f1.close()
f2.close()
def print_user_train():
for key in user_train:
print key + ' : ' + str(user_train[key])
time.sleep(3)
def get_user_mean(user):
return user_mean[user]
def get_user_ratings(user):
return user_ratings[user];
def get_uu_sim(u,v):
return simy[u][v]
init_user_train()
# print user_train['4']
# print user_train['4']['11']
# n_size = input("Enter the max neighbourhood size")
compute_sim()
find_neighbors(n_size)
predict()
f.close()
| gpl-2.0 |
yyjiang/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
akionakamura/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
trashkalmar/omim | search/search_quality/scoring_model.py | 4 | 9667 | #!/usr/bin/env python3
from math import exp, log
from scipy.stats import pearsonr, t
from sklearn import svm
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.utils import resample
import argparse
import collections
import itertools
import numpy as np
import pandas as pd
import random
import sys
MAX_DISTANCE_METERS = 2e6
MAX_RANK = 255
RELEVANCES = {'Irrelevant': 0, 'Relevant': 1, 'Vital': 3}
NAME_SCORES = ['Zero', 'Substring', 'Prefix', 'Full Match']
SEARCH_TYPES = ['POI', 'Building', 'Street', 'Unclassified', 'Village', 'City', 'State', 'Country']
FEATURES = ['DistanceToPivot', 'Rank', 'FalseCats', 'ErrorsMade', 'AllTokensUsed'] + NAME_SCORES + SEARCH_TYPES
BOOTSTRAP_ITERATIONS = 10000
def transform_name_score(value, categories_match):
if categories_match == 1:
return 'Zero'
else:
return value
def normalize_data(data):
transform_distance = lambda v: min(v, MAX_DISTANCE_METERS) / MAX_DISTANCE_METERS
data['DistanceToPivot'] = data['DistanceToPivot'].apply(transform_distance)
data['Rank'] = data['Rank'].apply(lambda v: v / MAX_RANK)
data['Relevance'] = data['Relevance'].apply(lambda v: RELEVANCES[v])
cats = data['PureCats'].combine(data['FalseCats'], max)
# TODO (@y, @m): do forward/backward/subset selection of features
# instead of this merging. It would be great to conduct PCA on
# the features too.
data['NameScore'] = data['NameScore'].combine(cats, transform_name_score)
# Adds dummy variables to data for NAME_SCORES.
for ns in NAME_SCORES:
data[ns] = data['NameScore'].apply(lambda v: int(ns == v))
# Adds dummy variables to data for SEARCH_TYPES.
# We unify BUILDING with POI here, as we don't have enough
# training data to distinguish between them. Remove following
# line as soon as the model will be changed or we will have enough
# training data.
data['SearchType'] = data['SearchType'].apply(lambda v: v if v != 'Building' else 'POI')
for st in SEARCH_TYPES:
data[st] = data['SearchType'].apply(lambda v: int(st == v))
def compute_ndcg(relevances):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
array of scores.
"""
dcg = sum(r / log(2 + i, 2) for i, r in enumerate(relevances))
dcg_norm = sum(r / log(2 + i, 2) for i, r in enumerate(sorted(relevances, reverse=True)))
return dcg / dcg_norm if dcg_norm != 0 else 0
def compute_ndcgs_without_ws(data):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
data. Returns an array of ndcg scores in the shape [num groups of
features].
"""
grouped = data.groupby(data['SampleId'], sort=False).groups
ndcgs = []
for id in grouped:
indices = grouped[id]
relevances = np.array(data.ix[indices]['Relevance'])
ndcgs.append(compute_ndcg(relevances))
return ndcgs
def compute_ndcgs_for_ws(data, ws):
"""
Computes NDCG (Normalized Discounted Cumulative Gain) for a given
data and an array of coeffs in a linear model. Returns an array of
ndcg scores in the shape [num groups of features].
"""
data_scores = np.array([np.dot(data.ix[i][FEATURES], ws) for i in data.index])
grouped = data.groupby(data['SampleId'], sort=False).groups
ndcgs = []
for id in grouped:
indices = grouped[id]
relevances = np.array(data.ix[indices]['Relevance'])
scores = data_scores[indices]
# Reoders relevances in accordance with decreasing scores.
relevances = relevances[scores.argsort()[::-1]]
ndcgs.append(compute_ndcg(relevances))
return ndcgs
def transform_data(data):
"""
By a given data computes x and y that can be used as an input to a
linear SVM.
"""
grouped = data.groupby(data['SampleId'], sort=False)
xs, ys = [], []
# k is used to create a balanced samples set for better linear
# separation.
k = 1
for _, group in grouped:
features, relevances = group[FEATURES], group['Relevance']
n, total = len(group), 0
for _, (i, j) in enumerate(itertools.combinations(range(n), 2)):
dr = relevances.iloc[j] - relevances.iloc[i]
y = np.sign(dr)
if y == 0:
continue
x = np.array(features.iloc[j]) - np.array(features.iloc[i])
# Need to multiply x by average drop in NDCG when i-th and
# j-th are exchanged.
x *= abs(dr * (1 / log(j + 2, 2) - 1 / log(i + 2, 2)))
# This is needed to prevent disbalance in classes sizes.
if y != k:
x = np.negative(x)
y = -y
xs.append(x)
ys.append(y)
total += 1
k = -k
# Scales this group of features to equalize different search
# queries.
for i in range(-1, -total, -1):
xs[i] = xs[i] / total
return xs, ys
def show_pearson_statistics(xs, ys, features):
"""
Shows info about Pearson coefficient between features and
relevancy.
"""
print('***** Correlation table *****')
print('H0 - feature not is correlated with relevancy')
print('H1 - feature is correlated with relevancy')
print()
cs, ncs = [], []
for i, f in enumerate(features):
zs = [x[i] for x in xs]
(c, p) = pearsonr(zs, ys)
correlated = p < 0.05
print('{}: pearson={:.3f}, P(H1)={}'.format(f, c, 1 - p))
if correlated:
cs.append(f)
else:
ncs.append(f)
print()
print('Correlated:', cs)
print('Non-correlated:', ncs)
def raw_output(features, ws):
"""
Prints feature-coeff pairs to the standard output.
"""
print('{:<20}{}'.format('Feature', 'Value'))
print()
for f, w in zip(features, ws):
print('{:<20}{:.5f}'.format(f, w))
def print_const(name, value):
print('double const k{} = {:.7f};'.format(name, value))
def print_array(name, size, values):
print('double const {}[{}] = {{'.format(name, size))
print(',\n'.join(' {:.7f} /* {} */'.format(w, f) for (f, w) in values))
print('};')
def cpp_output(features, ws):
"""
Prints feature-coeff pairs in the C++-compatible format.
"""
ns, st = [], []
for f, w in zip(features, ws):
if f in NAME_SCORES:
ns.append((f, w))
elif f in SEARCH_TYPES:
st.append((f, w))
else:
print_const(f, w)
print_array('kNameScore', 'NameScore::NAME_SCORE_COUNT', ns)
print_array('kType', 'Model::TYPE_COUNT', st)
def show_bootstrap_statistics(clf, X, y, features):
num_features = len(features)
coefs = []
for i in range(num_features):
coefs.append([])
for _ in range(BOOTSTRAP_ITERATIONS):
X_sample, y_sample = resample(X, y)
clf.fit(X_sample, y_sample)
for i, c in enumerate(get_normalized_coefs(clf)):
coefs[i].append(c)
poi_index = features.index('POI')
building_index = features.index('Building')
coefs[building_index] = coefs[poi_index]
intervals = []
print()
print('***** Bootstrap statistics *****')
print('{:<20}{:<20}{:<10}{:<10}'.format('Feature', '95% interval', 't-value', 'Pr(>|t|)'))
print()
for i, cs in enumerate(coefs):
values = np.array(cs)
lo = np.percentile(values, 2.5)
hi = np.percentile(values, 97.5)
interval = '({:.3f}, {:.3f})'.format(lo, hi)
tv = np.mean(values) / np.std(values)
pr = (1.0 - t.cdf(x=abs(tv), df=len(values))) * 0.5
stv = '{:.3f}'.format(tv)
spr = '{:.3f}'.format(pr)
print('{:<20}{:<20}{:<10}{:<10}'.format(features[i], interval, stv, spr))
def get_normalized_coefs(clf):
ws = clf.coef_[0]
max_w = max(abs(w) for w in ws)
return np.divide(ws, max_w)
def main(args):
data = pd.read_csv(sys.stdin)
normalize_data(data)
ndcgs = compute_ndcgs_without_ws(data);
print('Current NDCG: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))
print()
xs, ys = transform_data(data)
clf = svm.LinearSVC(random_state=args.seed)
cv = KFold(n_splits=5, shuffle=True, random_state=args.seed)
# "C" stands for the regularizer constant.
grid = {'C': np.power(10.0, np.arange(-5, 6))}
gs = GridSearchCV(clf, grid, scoring='roc_auc', cv=cv)
gs.fit(xs, ys)
print('Best params: {}'.format(gs.best_params_))
ws = get_normalized_coefs(gs.best_estimator_)
# Following code restores coeffs for merged features.
ws[FEATURES.index('Building')] = ws[FEATURES.index('POI')]
ndcgs = compute_ndcgs_for_ws(data, ws)
print('NDCG mean: {:.3f}, std: {:.3f}'.format(np.mean(ndcgs), np.std(ndcgs)))
print('ROC AUC: {:.3f}'.format(gs.best_score_))
if args.pearson:
print()
show_pearson_statistics(xs, ys, FEATURES)
print()
print('***** Linear model weights *****')
if args.cpp:
cpp_output(FEATURES, ws)
else:
raw_output(FEATURES, ws)
if args.bootstrap:
show_bootstrap_statistics(clf, xs, ys, FEATURES)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed', help='random seed', type=int)
parser.add_argument('--pearson', help='show pearson statistics', action='store_true')
parser.add_argument('--cpp', help='generate output in the C++ format', action='store_true')
parser.add_argument('--bootstrap', help='show bootstrap confidence intervals', action='store_true')
args = parser.parse_args()
main(args)
| apache-2.0 |
vigilv/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/tight_bbox.py | 22 | 2601 | """
This module is to support *bbox_inches* option in savefig command.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
if fixed_dpi is not None:
tr = Affine2D().scale(fixed_dpi)
dpi_scale = fixed_dpi / fig.dpi
else:
tr = Affine2D().scale(fig.dpi)
dpi_scale = 1.
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width, bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * dpi_scale, fig.bbox.height * dpi_scale
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
return restore_bbox
def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(fig, bbox_inches, fixed_dpi)
return bbox_inches, r
| gpl-3.0 |
atanna/benchpy | benchpy/magic.py | 1 | 5170 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from functools import partial
from matplotlib import pyplot as plt
def magic_benchpy(line='', cell=None):
"""
Run benchpy.run
%benchpy [[-i] [-g] [-n <N>] [-m <M>] [-p] [-r <R>] [-t <T>] -s<S>] statement
where statement is Bench or Group or list with benches
%%benchpy [[-i] -g<G> -m<M> -n<N> [-p] -s<S>]
long description of statement
Options:
-i: return full information about benchmark results.
-g: use information from garbage collector (with_gc=True).
Default: 'False'.
-n<N>: set maximum of batch size <N> (max_batch=<N>).
Default: 10.
-m<M>: set number of batches for fitting regression <M> (n_batches=<M>).
Default: 10.
batch_sizes = [1, ...,M-2<M>/<N>, M-<M>/<N>, <M>]
-p: show plots with regression.
-r<R>: repeat the loop iteration <R> (n_samples=<R>).
Default 5.
-t<T>: choose columns <T> to represent result.
<T> = [t][c][f][s][m][M][r][g][i]
where
t='Time'
c='CI' - confidence interval
f='Features_time' - time for each regression parameter
s='Std' - standard deviation for regression parameter (which means time)
m='Min' - minimum of the time values
M='Max' - maximum
r="R2" - r2 regression score
g='gc_time' - time for gc collections (useful only with python version >= 3.3)
i='fit_info' - fitting information
Default - default in repr.
Examples
--------
::
In [1]: import benchpy as bp
In [2]: %benchpy 10**10000
+--------------+-------------------------------+-------------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] |
+--------------+-------------------------------+-------------------------------+
| 225.33965124 | [ 210.72239262 239.54741751] | [ 177.29140495 48.04824629] |
+--------------+-------------------------------+-------------------------------+
In [3]: %benchpy -t tcsrmM 10**10000
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
| Time (µs) | CI_tquant[0.95] | Std | R2 | Min | Max |
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
| 226.600298929 | [ 213.60009798 240.16961693] | 7.00210625405 | 0.999999184569 | 179.693800055 | 226.248999752 |
+---------------+-------------------------------+---------------+----------------+---------------+---------------+
In [4]: def cycles(n):
...: for i in range(n):
...: arr = []
...: arr.append(arr)
...:
In [9]: %benchpy -n 1000 cycles(100)
+---------------+-----------------------------+-----------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] |
+---------------+-----------------------------+-----------------------------+
| 23.3943861198 | [ 0. 25.96065552] | [ 20.87035101 2.52403511] |
+---------------+-----------------------------+-----------------------------+
In [10]: %benchpy -n 1000 -g cycles(100)
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
| Time (µs) | CI_tquant[0.95] | Features: ['batch' 'const'] | gc_time | predicted time without gc |
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
| 64.256959342 | [ 0. 99.92966164] | [ 28.80691753 35.45004181] | 7.67428691294 | 56.582672429 |
+--------------+-----------------------------+-----------------------------+---------------+---------------------------+
"""
from IPython import get_ipython
from IPython.core.magics import UserMagics
ip = get_ipython()
opts, arg_str = UserMagics(ip).parse_options(
line, 'igm:n:pr:t:', list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = ip.input_transformer_manager.transform_cell(cell)
with_gc = 'g' in opts
n_samples = int(opts.get('r', [5])[0])
max_batch = int(opts.get('n', [10])[0])
n_batches = min(int(max_batch), int(opts.get('m', [10])[0]))
table_keys = None
table_labels = opts.get('t', [None])[0]
if table_labels is not None:
table_keys = table_labels
f = partial(exec, arg_str, ip.user_ns)
from . import run, bench
res = run(bench("<magic>", f), with_gc=with_gc,
n_samples=n_samples,
n_batches=n_batches,
max_batch=max_batch)
if 'i' in opts:
print(res._repr("Full"))
else:
print(res._repr(table_keys, with_empty=False))
if 'p' in opts:
res.plot()
res.plot_features()
plt.show()
def load_ipython_extension(ip):
"""API for IPython to recognize this module as an IPython extension."""
ip.register_magic_function(magic_benchpy, "line_cell", magic_name="benchpy")
| mit |
jakdot/pyactr | tutorials/forbook/code/ch7_lexical_decision_pyactr_no_imaginal.py | 1 | 10801 | """
A model of lexical decision: Bayes+ACT-R, no imaginal buffer
"""
import warnings
import sys
import matplotlib as mpl
mpl.use("pgf")
pgf_with_pdflatex = {"text.usetex": True, "pgf.texsystem": "pdflatex",
"pgf.preamble": [r"\usepackage{mathpazo}",
r"\usepackage[utf8x]{inputenc}",
r"\usepackage[T1]{fontenc}",
r"\usepackage{amsmath}"],
"axes.labelsize": 8,
"font.family": "serif",
"font.serif":["Palatino"],
"font.size": 8,
"legend.fontsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8}
mpl.rcParams.update(pgf_with_pdflatex)
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import seaborn as sns
sns.set_style({"font.family":"serif", "font.serif":["Palatino"]})
import pandas as pd
import pyactr as actr
import math
from simpy.core import EmptySchedule
import numpy as np
import re
import scipy.stats as stats
import scipy
import pymc3 as pm
from pymc3 import Gamma, Normal, HalfNormal, Deterministic, Uniform, find_MAP,\
Slice, sample, summary, Metropolis, traceplot, gelman_rubin
from pymc3.backends.base import merge_traces
from pymc3.backends import SQLite
from pymc3.backends.sqlite import load
import theano
import theano.tensor as tt
from theano.compile.ops import as_op
warnings.filterwarnings("ignore")
FREQ = np.array([242, 92.8, 57.7, 40.5, 30.6, 23.4, 19,\
16, 13.4, 11.5, 10, 9, 7, 5, 3, 1])
RT = np.array([542, 555, 566, 562, 570, 569, 577, 587,\
592, 605, 603, 575, 620, 607, 622, 674])
ACCURACY = np.array([97.22, 95.56, 95.56, 96.3, 96.11, 94.26,\
95, 92.41, 91.67, 93.52, 91.85, 93.52,\
91.48, 90.93, 84.44, 74.63])/100
environment = actr.Environment(focus_position=(320, 180))
lex_decision = actr.ACTRModel(environment=environment,\
subsymbolic=True,\
automatic_visual_search=True,\
activation_trace=False,\
retrieval_threshold=-80,\
motor_prepared=True,
eye_mvt_scaling_parameter=0.18,\
emma_noise=False)
actr.chunktype("goal", "state")
actr.chunktype("word", "form")
# on average, 15 years of exposure is 112.5 million words
SEC_IN_YEAR = 365*24*3600
SEC_IN_TIME = 15*SEC_IN_YEAR
FREQ_DICT = {}
FREQ_DICT['guy'] = 242*112.5
FREQ_DICT['somebody'] = 92*112.5
FREQ_DICT['extend'] = 58*112.5
FREQ_DICT['dance'] = 40.5*112.5
FREQ_DICT['shape'] = 30.6*112.5
FREQ_DICT['besides'] = 23.4*112.5
FREQ_DICT['fit'] = 19*112.5
FREQ_DICT['dedicate'] = 16*112.5
FREQ_DICT['robot'] = 13.4*112.5
FREQ_DICT['tile'] = 11.5*112.5
FREQ_DICT['between'] = 10*112.5
FREQ_DICT['precedent'] = 9*112.5
FREQ_DICT['wrestle'] = 7*112.5
FREQ_DICT['resonate'] = 5*112.5
FREQ_DICT['seated'] = 3*112.5
FREQ_DICT['habitually'] = 1*112.5
ORDERED_FREQ = sorted(list(FREQ_DICT), key=lambda x:FREQ_DICT[x], reverse=True)
def time_freq(freq):
rehearsals = np.zeros((np.int(np.max(freq) * 113), len(freq)))
for i in np.arange(len(freq)):
temp = np.arange(np.int((freq[i]*112.5)))
temp = temp * np.int(SEC_IN_TIME/(freq[i]*112.5))
rehearsals[:len(temp),i] = temp
return(rehearsals.T)
time = theano.shared(time_freq(FREQ), 'time')
LEMMA_CHUNKS = [(actr.makechunk("", typename="word", form=word))
for word in ORDERED_FREQ]
lex_decision.set_decmem({x: np.array([]) for x in LEMMA_CHUNKS})
lex_decision.goals = {}
lex_decision.set_goal("g")
lex_decision.productionstring(name="attend word", string="""
=g>
isa goal
state 'attend'
=visual_location>
isa _visuallocation
?visual>
state free
==>
=g>
isa goal
state 'retrieving'
+visual>
isa _visual
cmd move_attention
screen_pos =visual_location
~visual_location>
""")
lex_decision.productionstring(name="retrieving", string="""
=g>
isa goal
state 'retrieving'
=visual>
isa _visual
value =val
==>
=g>
isa goal
state 'retrieval_done'
+retrieval>
isa word
form =val
""")
lex_decision.productionstring(name="lexeme retrieved", string="""
=g>
isa goal
state 'retrieval_done'
?retrieval>
buffer full
state free
==>
=g>
isa goal
state 'done'
+manual>
isa _manual
cmd press_key
key 'J'
""")
lex_decision.productionstring(name="no lexeme found", string="""
=g>
isa goal
state 'retrieval_done'
?retrieval>
buffer empty
state error
==>
=g>
isa goal
state 'done'
+manual>
isa _manual
cmd press_key
key 'F'
""")
def run_stimulus(word):
"""
Function running one instance of lexical decision for a word.
"""
# reset model state to initial state for a new simulation
# (flush buffers without moving their contents to dec mem)
try:
lex_decision.retrieval.pop()
except KeyError:
pass
try:
lex_decision.goals["g"].pop()
except KeyError:
pass
# reinitialize model
stim = {1: {'text': word, 'position': (320, 180)}}
lex_decision.goals["g"].add(actr.makechunk(nameofchunk='start',
typename="goal",
state='attend'))
environment.current_focus = [320,180]
lex_decision.model_parameters['motor_prepared'] = True
# run new simulation
lex_dec_sim = lex_decision.simulation(realtime=False, gui=False, trace=False,
environment_process=environment.environment_process,
stimuli=stim, triggers='', times=10)
while True:
lex_dec_sim.step()
if lex_dec_sim.current_event.action == "KEY PRESSED: J":
estimated_time = lex_dec_sim.show_time()
break
if lex_dec_sim.current_event.action == "KEY PRESSED: F":
estimated_time = -1
break
return estimated_time
def run_lex_decision_task():
"""
Function running a full lexical decision task:
it calls run_stimulus(word) for words from all 16 freq bands.
"""
sample = []
for word in ORDERED_FREQ:
sample.append(1000*run_stimulus(word))
return sample
@as_op(itypes=[tt.dscalar, tt.dscalar, tt.dscalar, tt.dvector],
otypes=[tt.dvector])
def actrmodel_latency(lf, le, decay, activation_from_time):
"""
Function running the entire lexical decision task for specific
values of the latency factor, latency exponent and decay parameters.
The activation computed with the specific value of the decay
parameter is also inherited as a separate argument to save expensive
computation time.
The function is wrapped inside the theano @as_op decorator so that
pymc3 / theano can use it as part of the RT likelihood function in the
Bayesian model below.
"""
lex_decision.model_parameters["latency_factor"] = lf
lex_decision.model_parameters["latency_exponent"] = le
lex_decision.model_parameters["decay"] = decay
activation_dict = {x[0]: x[1]
for x in zip(LEMMA_CHUNKS, activation_from_time)}
lex_decision.decmem.activations.update(activation_dict)
sample = run_lex_decision_task()
return np.array(sample)
lex_decision_with_bayes = pm.Model()
with lex_decision_with_bayes:
# prior for activation
decay = Uniform('decay', lower=0, upper=1)
# priors for accuracy
noise = Uniform('noise', lower=0, upper=5)
threshold = Normal('threshold', mu=0, sd=10)
# priors for latency
lf = HalfNormal('lf', sd=1)
le = HalfNormal('le', sd=1)
# compute activation
scaled_time = time ** (-decay)
def compute_activation(scaled_time_vector):
compare = tt.isinf(scaled_time_vector)
subvector = scaled_time_vector[(1-compare).nonzero()]
activation_from_time = tt.log(subvector.sum())
return activation_from_time
activation_from_time, _ = theano.scan(fn=compute_activation,\
sequences=scaled_time)
# latency likelihood -- this is where pyactr is used
pyactr_rt = actrmodel_latency(lf, le, decay, activation_from_time)
mu_rt = Deterministic('mu_rt', pyactr_rt)
rt_observed = Normal('rt_observed', mu=mu_rt, sd=0.01, observed=RT)
# accuracy likelihood
odds_reciprocal = tt.exp(-(activation_from_time - threshold)/noise)
mu_prob = Deterministic('mu_prob', 1/(1 + odds_reciprocal))
prob_observed = Normal('prob_observed', mu=mu_prob, sd=0.01,\
observed=ACCURACY)
# we start the sampling
#step = Metropolis()
#db = SQLite('lex_dec_pyactr_chain_no_imaginal.sqlite')
#trace = sample(draws=60000, trace=db, njobs=1, step=step, init='auto')
with lex_decision_with_bayes:
trace = load('./data/lex_dec_pyactr_chain_no_imaginal.sqlite')
trace = trace[10500:]
mu_rt = pd.DataFrame(trace['mu_rt'])
yerr_rt = [(mu_rt.mean()-mu_rt.quantile(0.025)),\
(mu_rt.quantile(0.975)-mu_rt.mean())]
mu_prob = pd.DataFrame(trace['mu_prob'])
yerr_prob = [(mu_prob.mean()-mu_prob.quantile(0.025)),\
(mu_prob.quantile(0.975)-mu_prob.mean())]
def generate_lex_dec_pyactr_no_imaginal_figure():
fig, (ax1, ax2) = plt.subplots(ncols=1, nrows=2)
fig.set_size_inches(6.0, 8.5)
# plot 1: RTs
ax1.errorbar(RT, mu_rt.mean(), yerr=yerr_rt, marker='o', linestyle='')
ax1.plot(np.linspace(500, 800, 10), np.linspace(500, 800, 10),\
color='red', linestyle=':')
ax1.set_title('Lex. dec. model (pyactr, no imaginal): RTs')
ax1.set_xlabel('Observed RTs (ms)')
ax1.set_ylabel('Predicted RTs (ms)')
ax1.grid(b=True, which='minor', color='w', linewidth=1.0)
# plot 2: probabilities
ax2.errorbar(ACCURACY, mu_prob.mean(), yerr=yerr_prob, marker='o',\
linestyle='')
ax2.plot(np.linspace(50, 100, 10)/100,\
np.linspace(50, 100, 10)/100,\
color='red', linestyle=':')
ax2.set_title('Lex. dec. model (pyactr, no imaginal): Prob.s')
ax2.set_xlabel('Observed probabilities')
ax2.set_ylabel('Predicted probabilities')
ax2.grid(b=True, which='minor', color='w', linewidth=1.0)
# clean up and save
plt.tight_layout(pad=0.5, w_pad=0.2, h_pad=0.7)
plt.savefig('./figures/lex_dec_model_pyactr_no_imaginal.pgf')
plt.savefig('./figures/lex_dec_model_pyactr_no_imaginal.pdf')
generate_lex_dec_pyactr_no_imaginal_figure()
| gpl-3.0 |
rpbarnes/nmrglue | doc/_build/html/examples/el/interactive/2d_interactive/2d_interactive.py | 10 | 1209 | #! /usr/bin/env python
# Create contour plots of a 2D NMRPipe spectrum
import nmrglue as ng
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
# plot parameters
cmap = matplotlib.cm.Blues_r # contour map (colors to use for contours)
contour_start = 30000 # contour level start value
contour_num = 20 # number of contour levels
contour_factor = 1.20 # scaling factor between contour levels
# calculate contour levels
cl = [contour_start*contour_factor**x for x in xrange(contour_num)]
# read in the data from a NMRPipe file
dic,data = ng.pipe.read("../../common_data/2d_pipe/test.ft2")
# make ppm scales
uc_13c = ng.pipe.make_uc(dic,data,dim=1)
ppm_13c = uc_13c.ppm_scale()
uc_15n = ng.pipe.make_uc(dic,data,dim=0)
ppm_15n = uc_15n.ppm_scale()
# create the figure
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the contours
etup = (ppm_13c[0],ppm_13c[-1],ppm_15n[0],ppm_15n[-1])
ax.contour(data,cl,cmap=cmap,extent=etup)
# decorate the axes
ax.set_ylabel("15N (ppm)")
ax.set_xlabel("13C (ppm)")
ax.set_title("Protein 2D NCa Spectrum")
ax.set_xlim(70,40)
ax.set_ylim(135,100)
# start interactive session, script ends when window is closed
plt.show()
| bsd-3-clause |
lail3344/sms-tools | lectures/09-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
liangz0707/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
aberdah/Stockvider | stockvider/stockviderApp/sourceDA/rawData/referenceRawDataDA.py | 1 | 13837 | # -*- coding: utf-8 -*-
'''
Todo :
- vérifier que quand on fait le réindex, les dates ajoutées à un DF ont
bien des values à NaN (sinon ça fausse le vote).
'''
import pandas as pd
import numpy as np
class ReferenceRawDataDA(object):
'''
Base class handling **raw data aggregation to reference data**.
'''
def __init__(self):
return
# -------------------- Méthodes publiques ---------------------
def returnDataFrame(self, refSymbol, startDate, endDate):
'''
Returns the merged dataFrame corresponding to the symbol between
the startDate and endDate (included).
:param refSymbol: symbol name
:type refSymbol: str
:param startDate: oldest date wanted (included)
:type startDate: date
:param endDate: newest date wanted (included)
:type endDate: date
:returns: the merged dataFrame
:rtype: dataFrame
'''
# Liste contenant les data frames
dataFramesList = []
# Crée l'index
dateIndex = pd.date_range(startDate, endDate)
# Pour chaque dataFrame
# S'il existe on le réindexe et on ne garde que les dates entre les
# dates demandées. Et on l'append à la liste.
# S'il n'existe pas on crée un dataFrame à Nan et on l'append à
# la liste
# Ne garde que les colonnes OHLC et volume de chaque data frame
# Google
if refSymbol.googleSymbol is not None:
googleDf = refSymbol.googleSymbol.rawData.dataFrame
googleDf = googleDf.loc[startDate:endDate]
googleDf = googleDf.reindex(dateIndex)
else:
googleDf = pd.DataFrame(index=dateIndex, columns=['Open', 'High',
'Low', 'Close',
'Volume'])
dataFramesList.append(googleDf)
# Yahoo
if refSymbol.yahooSymbol is not None:
yahooDf = refSymbol.yahooSymbol.rawData.dataFrame
yahooDf = yahooDf.loc[startDate:endDate]
yahooDf = yahooDf.drop(['Adjusted Close'], axis=1)
yahooDf = yahooDf.reindex(dateIndex)
else:
yahooDf = pd.DataFrame(index=dateIndex, columns=['Open', 'High',
'Low', 'Close',
'Volume'])
dataFramesList.append(yahooDf)
# Wiki
if refSymbol.wikiSymbol is not None:
wikiDf = refSymbol.wikiSymbol.rawData.dataFrame
wikiDf = wikiDf.loc[startDate:endDate]
wikiDf = wikiDf.drop(['Ex-Dividend', 'Split Ratio', 'Adj. Open',
'Adj. High', 'Adj. Low', 'Adj. Close',
'Adj. Volume'], axis=1)
wikiDf = wikiDf.reindex(dateIndex)
else:
wikiDf = pd.DataFrame(index=dateIndex, columns=['Open', 'High',
'Low', 'Close',
'Volume'])
dataFramesList.append(wikiDf)
# Récupère le nouveau data frame mergé
mergedDF = ReferenceRawDataDA._returnMergedDataFrame(dataFramesList)
return mergedDF
def returnMetaData(self, refSymbol):
'''
Returns the meta data of the symbol through a dict.
:param refSymbol: symbol name
:type refSymbol: str
:returns: dict of meta data
:rtype: dict
.. note::
To access the dict attributes:
>>> metaData['newest_available_date']
>>> metaData['oldest_available_date]
>>> metaData['refreshed_at']
>>> metaData['newest_consolidated_date']
'''
metaData = {}
# Récupère les plus oldest et newest date des symbols ainsi que la
# date la plus récente de maj des sources
oldestDatesList = []
newestDatesList = []
updateDatesList = []
if refSymbol.googleSymbol is not None:
oldestDatesList.append(refSymbol.googleSymbol.rawData.oldestDateAvailable)
newestDatesList.append(refSymbol.googleSymbol.rawData.newestDateAvailable)
updateDatesList.append(refSymbol.googleSymbol.rawData.updateDate)
if refSymbol.yahooSymbol is not None:
oldestDatesList.append(refSymbol.yahooSymbol.rawData.oldestDateAvailable)
newestDatesList.append(refSymbol.yahooSymbol.rawData.newestDateAvailable)
updateDatesList.append(refSymbol.yahooSymbol.rawData.updateDate)
if refSymbol.wikiSymbol is not None:
oldestDatesList.append(refSymbol.wikiSymbol.rawData.oldestDateAvailable)
newestDatesList.append(refSymbol.wikiSymbol.rawData.newestDateAvailable)
updateDatesList.append(refSymbol.wikiSymbol.rawData.updateDate)
metaData['newest_available_date'] = max(newestDatesList)
metaData['oldest_available_date'] = min(oldestDatesList)
metaData['refreshed_at'] = max(updateDatesList)
metaData['newest_consolidated_date'] = min(newestDatesList)
# Les data avant cette date ne seront plus modifiée, tandis que les
# data après peuvent l'être car il aura manqué les data venant d'au
# moins une des sources.
return metaData
# -------------------- Méthodes privées ---------------------
@classmethod
def _returnMergedDataFrame(cls, dataFramesList):
'''
Merges and returns the dataFrame from the dataFrame list.
:param dataFramesList: list of dataFrames to merge
:type dataFramesList: list
:returns: merged dataFrame
:rtype: dataFrame
.. warning::
The order of dataFrames in the list (1st: Google, 2nd: Yahoo,
3rd: Wiki) is of primordial importance here. The aggregation
functions may privilege some source instead of the others depending
on their data.
.. note::
The returned dataFrame contains the column Open, High, Low, Close
and Volume.
'''
# Concatène les data frames
mergedDf = pd.concat(dataFramesList)
# Aggrege les data frames
mergedDf = mergedDf.groupby(mergedDf.index, sort=False).agg({'Open' : ReferenceRawDataDA._returnVotedValuePrice,
'High' : ReferenceRawDataDA._returnVotedValuePrice,
'Low' : ReferenceRawDataDA._returnVotedValuePrice,
'Close' : ReferenceRawDataDA._returnVotedValuePrice,
'Volume' : ReferenceRawDataDA._returnVotedValueVolume,
})
# Le df est group by dans n'importe quel sens, on le resample pour le mettre dans le bon sens
mergedDf = mergedDf.resample(rule='D').last()
return mergedDf
@classmethod
def _returnVotedValuePrice(cls, valuesList):
'''
Returns the price value during an aggregation. If no value can be
returned, then returns NaN.
:param valuesList: list of values to aggregate
:type valuesList: list
:returns: value aggregated
:rtype: double or NaN
.. warning::
Assumes that the values are in the following order in the list:
[Google, Yahoo, Wiki]
.. note::
Factor applied:
- Google --> 1
- Yahoo --> 1
- Wiki --> 2
More confidence is given to Wiki.
'''
tol = 0.008 # Tolérance sur les écarts
# Dictionnaire contenant les votes
voteDict = {}
# Retire les Nan de la liste et duplique les valeurs pour appliquer
# les coeffs
correctedValueList = []
for (index, value) in enumerate(valuesList):
# Saute la valeur si elle est à Nan
if np.isnan(value):
continue
# Cas Google et Yahoo
if index == 0 or index == 1:
correctedValueList.append(value)
# Cas Wiki
if index == 2:
correctedValueList.append(value)
correctedValueList.append(value)
if len(correctedValueList) == 0:
return np.nan
elif len(correctedValueList) == 1:
return correctedValueList[0]
else:
# Compte les votes
voteDict[correctedValueList[0]] = 1
for valueToVote in correctedValueList[1:]:
# Set le booléen pour savoir si la valeur a voté
hasVoted = False
for currentValue in voteDict.keys():
if abs(valueToVote - currentValue) / max(currentValue, valueToVote) < tol:
voteDict[currentValue] += 1
hasVoted = True
# Pas de break, on autorise la valeur à voter plusieurs fois
# Vérifie si la valeur a voté sinon ajoute son cas
if not hasVoted:
voteDict[valueToVote] = 1
# Retourne la valeur avec avec le plus de vote uniquement si elle gagne
# par majorité.
# Récupère le nombre de votes max
maxVoteNumber = max(voteDict.values())
# Récupère la liste de valeur qui atteignent ce max
votedValuesList = [x for x in voteDict.keys() if voteDict[x] == maxVoteNumber]
if len(votedValuesList) == 1:
return votedValuesList[0]
else:
return np.nan
return np.nan
@classmethod
def _returnVotedValueVolume(cls, valuesList):
'''
Returns the volume value during an aggregation. If no value can be
returned, then returns NaN.
:param valuesList: list of values to aggregate
:type valuesList: list
:returns: value aggregated
:rtype: double or NaN
.. warning::
Assumes that the values are in the following order in the list:
[Google, Yahoo, Wiki]
.. note::
Factor applied:
- Google --> 2
- Yahoo --> 1
- Wiki --> 3
More confidence is given to Wiki then Google
'''
tol = 0.1 # Tolérance sur les écarts
# Dictionnaire contenant les votes
voteDict = {}
# Retire les Nan de la liste et duplique les valeurs pour appliquer
# les coeffs
correctedValueList = []
for (index, value) in enumerate(valuesList):
# Saute la valeur si elle est à Nan
if np.isnan(value):
continue
# Cas Google
if index == 0:
correctedValueList.append(value)
correctedValueList.append(value)
# Cas Yahoo
if index == 1:
correctedValueList.append(value)
# Cas Wiki
if index == 2:
correctedValueList.append(value)
correctedValueList.append(value)
correctedValueList.append(value)
if len(correctedValueList) == 0:
return np.nan
elif len(correctedValueList) == 1:
return correctedValueList[0]
else:
# Compte les votes
voteDict[correctedValueList[0]] = 1
for valueToVote in correctedValueList[1:]:
# Set le booléen pour savoir si la valeur a voté
hasVoted = False
for currentValue in voteDict.keys():
if abs(valueToVote - currentValue) / max(currentValue, valueToVote) < tol:
voteDict[currentValue] += 1
hasVoted = True
# Pas de break, on autorise la valeur à voter plusieurs fois
# Vérifie si la valeur a voté sinon ajoute son cas
if not hasVoted:
voteDict[valueToVote] = 1
# Retourne la valeur avec avec le plus de vote uniquement si elle gagne
# par majorité.
# Récupère le nombre de votes max
maxVoteNumber = max(voteDict.values())
# Récupère la liste de valeur qui atteignent ce max
votedValuesList = [x for x in voteDict.keys() if voteDict[x] == maxVoteNumber]
if len(votedValuesList) == 1:
return votedValuesList[0]
else:
return np.nan
return np.nan
| mit |
DonBeo/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
abimannans/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
gilt/incubator-airflow | airflow/contrib/operators/hive_to_dynamodb.py | 15 | 3701 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
logging.info('Extracting data from Hive')
logging.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name, table_keys=self.table_keys, region_name=self.region_name)
logging.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data, args=self.pre_process_args, kwargs=self.pre_process_kwargs))
logging.info('Done.')
| apache-2.0 |
h2oai/h2o-3 | h2o-hadoop-3/tests/python/pyunit_s3_import_export.py | 2 | 2041 | #! /usr/env/python
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
from tests import pyunit_utils
from datetime import datetime
import h2o
import uuid
from pandas.util.testing import assert_frame_equal
import boto3
def s3_import_export():
local_frame = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
for scheme in ["s3a"]: # s3n is deprecated since HDP3/CDH6
timestamp = datetime.today().utcnow().strftime("%Y%m%d-%H%M%S.%f")
unique_suffix = str(uuid.uuid4())
s3_path = scheme + "://test.0xdata.com/h2o-hadoop-tests/test-export/" + scheme + "/exported." + \
timestamp + "." + unique_suffix + ".csv.zip"
h2o.export_file(local_frame, s3_path)
s3 = boto3.resource('s3')
client = boto3.client('s3')
# S3 might have a delay in indexing the file (usually milliseconds or hundreds of milliseconds)
# Wait for the file to be available, if not available in the biginning, try every 2 seconds, up to 10 times
client.get_waiter('object_exists').wait(Bucket='test.0xdata.com',
Key="h2o-hadoop-tests/test-export/" + scheme + "/exported." + \
timestamp + "." + unique_suffix + ".csv.zip",
WaiterConfig={
'Delay': 2,
'MaxAttempts': 10
})
s3_frame = h2o.import_file(s3_path)
assert_frame_equal(local_frame.as_data_frame(), s3_frame.as_data_frame())
s3.Object(bucket_name='test.0xdata.com', key="h2o-hadoop-tests/test-export/" + scheme + "/exported." + \
timestamp + "." + unique_suffix + ".csv.zip").delete()
if __name__ == "__main__":
pyunit_utils.standalone_test(s3_import_export)
else:
s3_import_export()
| apache-2.0 |
choldgraf/download | examples/plot_download_providers.py | 1 | 1876 | """
Download from Dropbox, Google Drive, and Github
-----------------------------------------------
It's also possible to download files from Github, Google Drive, and Dropbox.
While you can go through a little extra effort to get a direct download link,
``download`` will try to make things a little bit easier for you.
"""
from download import download
import matplotlib.pyplot as plt
import os.path as op
import shutil as sh
###############################################################################
# You can simply find the link to your content on GitHub and give it directly
# to Download. It will try to be smart about converting the link where
# necessary.
url = "https://drive.google.com/file/d/0B8VZ4vaOYWZ3c3Y1c2ZQX01yREk/view?usp=sharing"
path = download(url, "./downloaded/citation.png", replace=True)
fig, ax = plt.subplots()
im = plt.imread(path)
ax.imshow(im)
ax.set_axis_off()
###############################################################################
# The same works for Google Drive content.
#
# .. note:: Make sure your sharing options let any user access the file.
url = "https://github.com/choldgraf/download/blob/master/examples/data/citation.png"
path2 = download(url, "./downloaded/citation2.png", replace=True)
fig, ax = plt.subplots()
im2 = plt.imread(path)
ax.imshow(im2)
ax.set_axis_off()
###############################################################################
# Dropbox links also work, though in this case ``download`` will use
# the ``requests`` library to download the file. This is because Dropbox
# requires cookies and requests is smart about handling this.
url = "https://www.dropbox.com/s/rlndt99tss65418/citation.png?dl=0"
path3 = download(url, "./downloaded/citation3.png", replace=True)
fig, ax = plt.subplots()
im3 = plt.imread(path3)
ax.imshow(im)
ax.set_axis_off()
sh.rmtree("./downloaded")
plt.show()
| mit |
tianrui/521dev | svhn/cnn_train.py | 1 | 8823 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import pdb
import numpy as np
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
x_image = tf.reshape(x, [-1, 32, 32, 3])
# First convolutional layer - maps one grayscale image to 32 feature maps.
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc1 = weight_variable([8 * 8 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Fully connected layer 2 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
W_fc2 = weight_variable([1024, 1024])
b_fc2 = bias_variable([1024])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
W_fc3 = weight_variable([1024, 10])
b_fc3 = bias_variable([10])
y_conv = tf.matmul(h_fc2_drop, W_fc3) + b_fc3
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def load_data(data_dir='./Data/SVHN'):
train_dict = loadmat(data_dir+'/train_32x32.mat')
test_dict = loadmat(data_dir+'/test_32x32.mat')
train_x = train_dict['X']
train_y = np.eye(10)[train_dict['y'][:, 0] % 10] # reformat 1-10 to 0-9 because class=10 shows 0
test_x = test_dict['X']
test_y = np.eye(10)[test_dict['y'][:, 0] % 10]
return train_x, train_y, test_x, test_y
def load_batch(x, y, i, batch_size):
return [x[:, :, :, (i*batch_size) % x.shape[-1] : ((i+1)*batch_size) % x.shape[-1]], y[(i*batch_size) % x.shape[-1] : (i+1)*batch_size % x.shape[-1], :]]
def np_accuracy(ytar, ypred):
# Numpy implementation of accuracy
return np.sum(np.argmax(ytar, 1) == np.argmax(ypred, 1))/ytar.shape[0]
def main(_):
# Import data
train_x, train_y, test_x, test_y = load_data(data_dir='/home/rxiao/data/svhn/')
# Define hyperparameters
batch_size = 16
epochs = 100
train_size = train_x.shape[-1]
test_size = test_x.shape[-1]
train_ce = np.zeros(epochs)
train_acc = np.zeros(epochs)
test_ce = np.zeros(epochs)
test_acc = np.zeros(epochs)
pdb.set_trace()
# Create the model
x = tf.placeholder(tf.float32, [32, 32, 3, None])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Config session for memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.log_device_placement=True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
start = time.time()
for j in range(epochs):
acc = 0
ce = 0
for i in range(int(train_size/batch_size)):
batch = load_batch(train_x, train_y, i, batch_size)
if i % 100 == 0:
train_accuracy, train_centropy, preds = sess.run([accuracy, cross_entropy, y_conv], feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g, cross-entropy %g, numpy accuracy %g' % (i, train_accuracy, train_centropy, np_accuracy(batch[1], preds)))
train_ce[j] += cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
train_acc[j] += accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.9})
train_ce[j] /= int(train_size / batch_size)
train_acc[j] /= int(train_size/ batch_size)
print('training accuracy %g' % train_acc[j])
print('training cross-entropy %g at epoch %d' % (train_ce[j], j))
acc = 0
ce = 0
for i in range(int(test_size / batch_size)):
batch = load_batch(test_x, test_y, i, batch_size)
acc += accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
ce += cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
acc /= int(test_size / batch_size)
ce /= int(test_size / batch_size)
acc /= int(test_size / batch_size)
print('test accuracy %g' % acc)
print('test cross-entropy %g' % ce)
test_ce[j] = ce
test_acc[j] = acc
#pdb.set_trace()
#print('test accuracy %g' % accuracy.eval(feed_dict={
# x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
end = time.time()
print('Time elapsed: %f' % (end-start))
np.save('./SVHN/cnn_train_stats', {'train_ce': train_ce, 'train_acc': train_acc,
'test_ce': test_ce, 'test_acc': test_acc})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| gpl-3.0 |
kaichogami/scikit-learn | sklearn/__init__.py | 29 | 3071 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
neuro-lyon/multiglom-model | src/plotting.py | 1 | 6438 | # -*- coding:utf-8 -*-
from matplotlib import pyplot as plt, cm as cmap
from numpy import where
from brian.stdunits import *
from brian.units import *
from matplotlib.mlab import psd
from pylab import detrend_mean
def raster_plot(spikes_i, spikes_t, connection_matrix):
"""Raster plot with focus on interconnection neurons.
Parameters
----------
spikes_i: array
spike times
spikes_t: array
neuron number associated with spike time
connection_matrix: array
connection matrix of size (M mitrales, G granules)
"""
# Raster plot
plt.figure()
rasterp = plt.subplot2grid((4, 1), (0, 0), rowspan=3)
bin_connection_matrix = (connection_matrix > 0)
n_mitral, n_subpop = connection_matrix.shape
n_mitral_per_subpop = n_mitral/n_subpop
# Make a mapping, neuron: {spike times}
spike_map = {}
for neur, time in zip(spikes_i, spikes_t):
if spike_map.has_key(neur):
spike_map[int(neur)].append(time)
else:
spike_map[int(neur)] = [time]
# Plotting
colors = get_colorlist(n_subpop)
for ind_subpop in xrange(n_subpop):
subpop_start = ind_subpop*n_mitral_per_subpop
subpop_stop = subpop_start + n_mitral_per_subpop
subpop_color = colors[ind_subpop]
downline = subpop_start
upline = subpop_stop - 1
for ind_neuron in xrange(subpop_start, subpop_stop):
neur_connections = bin_connection_matrix[ind_neuron]
# Getting the neuron spike times, if it spiked
if spike_map.has_key(ind_neuron):
spikes = spike_map[ind_neuron]
else:
spikes = []
# Plotting the spikes for that neuron
if neur_connections.sum() > 1: # if the neuron is connected to more than one granule
dark_color = [i/1.5 for i in subpop_color[:-1]]
rasterp.plot(spikes, [upline]*len(spikes), ' .',
color=dark_color, mew=0)
upline -= 1
else:
rasterp.plot(spikes, [downline]*len(spikes), ' .',
color=subpop_color, mew=0)
downline += 1
# Some plotting enhancement
margin = 0.01
if len(spikes_t) > 0:
spikes_t_last = spikes_t[-1]
else:
spikes_t_last = 0.
x_overplot = margin*spikes_t_last
y_overplot = margin*n_mitral
rasterp.set_xlim((-x_overplot), (spikes_t_last + x_overplot))
rasterp.set_ylim(-y_overplot, n_mitral + y_overplot)
rasterp.set_ylabel("Neuron number")
# Raster histogram
rasterhisto = plt.subplot2grid((4, 1), (3, 0), sharex=rasterp)
nbins = spikes_t[-1] // 5e-3 # make bins of 5 ms
rasterhisto.hist(spikes_t, bins=nbins)
rasterhisto.set_xlabel("Time (s)")
rasterhisto.set_ylabel("Number of spikes")
plt.suptitle("Raster plot")
# Connection matrix plot
plt.figure()
plt.imshow(connection_matrix, interpolation="nearest", extent=(0, 1, 0, 1),
vmin=0, vmax=1)
plt.colorbar()
def get_colorlist(n_colors, cmap_name="Paired"):
"""Get a list of n_colors colors from a matplotlib colormap."""
colors = []
colormap = cmap.get_cmap(cmap_name)
assert colormap != None, cmap_name + " is not a valid colormap name."
for i in xrange(n_colors):
colors += [colormap(1.*i/n_colors)]
return colors
def memb_plot_figure(monit_mt, monit_gr, rec_neurons, n_granule):
"""Membrane potentials of mitral and granule cells."""
plt.figure()
sub_v_mt = plt.subplot(2, 1, 1)
for neur in rec_neurons:
sub_v_mt.plot(monit_mt['V'].times/msecond,
monit_mt['V'][neur]/mvolt)
sub_v_mt.set_xlabel('Time (ms)')
sub_v_mt.set_ylabel('Membrane potential of mitral : V (mvolt)')
sub_vd_gr = plt.subplot(2, 1, 2, sharex=sub_v_mt)
for gran in xrange(n_granule):
sub_vd_gr.plot(monit_gr['V_D'].times/msecond,
monit_gr['V_D'][gran]/mvolt, label="granule #" + str(gran))
sub_vd_gr.legend()
sub_vd_gr.set_xlabel('Time (ms)')
sub_vd_gr.set_ylabel('Membrane potential of granule : V (mvolt)')
def granule_figure(monit_gr, pscommon):
"""Wraper to the granule figure."""
granule_pop_figure(monit_gr['s'].values, monit_gr['s_syn_self'].values, monit_gr['s'].times, pscommon['resample_dt'], pscommon['burnin'])
def granule_pop_figure(gr_s, gr_s_syn_self, times, dt, burnin):
"""Plot a figure describing the granule activity, useful to see population synchrony."""
plt.figure()
n_granule = len(gr_s)
# Granule s
sub_s = plt.subplot2grid((4, 4), (0, 0), rowspan=2, colspan=3)
for num_granule in xrange(n_granule):
sub_s.plot(times/msecond, gr_s[num_granule],
label="s granule #" + str(num_granule))
sub_s.legend()
sub_s.set_xlabel('times (ms)')
sub_s.set_ylabel('s granule')
# Granule s_syn_self
sub_s_syn_self = plt.subplot2grid((4, 4), (2, 0), rowspan=2, colspan=3,
sharex=sub_s)
for num_granule in xrange(n_granule):
sub_s_syn_self.plot(times/msecond, gr_s_syn_self[num_granule],
label="s_syn_self granule #" + str(num_granule))
sub_s_syn_self.legend()
sub_s_syn_self.set_xlabel('times (ms)')
sub_s_syn_self.set_ylabel('s_syn_self granule')
# FFT max granules
sub_fft = plt.subplot2grid((4, 4), (0, 3), rowspan=4, colspan=1)
fft_max_freq = 200
sig_start = where(times > burnin)[0][0]
for num_granule in xrange(n_granule):
power, freqs = psd(gr_s[num_granule][sig_start:], Fs=int(1/dt),
NFFT=int(0.5/dt), noverlap=int(0.25/dt),
detrend=detrend_mean)
ind_max_freq = where(freqs <= fft_max_freq)[0][-1]
sub_fft.plot(freqs[:ind_max_freq], power[:ind_max_freq],
label="FFT on granule #" + str(num_granule) + " s")
sub_fft.legend()
sub_fft.set_xlabel("granule s frequency (Hz)")
sub_fft.set_ylabel('Power')
def plot_single_simulation(spikes_i, spikes_t, connection_matrix,
s_granule, s_syn_self, times, dt, burnin):
"""Plot figures for a single simulation"""
# Raster plot
raster_plot(spikes_i, spikes_t, connection_matrix)
# Granule figure
granule_pop_figure(s_granule, s_syn_self, times, dt, burnin)
plt.show()
| mit |
pv/scikit-learn | examples/svm/plot_rbf_parameters.py | 57 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
lifei96/Medium-crawler-with-data-analyzer | User_Crawler/medium_users_data_reader.py | 2 | 1748 | # -*- coding: utf-8 -*-
import pandas as pd
import json
import datetime
import os
def read_users():
users = list()
file_in = open('./username_list.txt', 'r')
username_list = str(file_in.read()).split(' ')
file_in.close()
num = 0
for username in username_list:
if not username:
continue
if not os.path.exists('./data/Users/%s.json' % username):
continue
try:
file_in = open('./data/Users/%s.json' % username, 'r')
raw_data = json.loads(str(file_in.read()))
file_in.close()
user = dict()
user['username'] = username
user['reg_date'] = datetime.date.fromtimestamp(raw_data['profile']['user']['createdAt']/1000.0).isoformat()
if not raw_data['profile']['user']['lastPostCreatedAt']:
raw_data['profile']['user']['lastPostCreatedAt'] = raw_data['profile']['user']['createdAt']
user['last_post_date'] = datetime.date.fromtimestamp(raw_data['profile']['user']['lastPostCreatedAt']/1000.0).isoformat()
user['posts_count'] = raw_data['profile']['numberOfPostsPublished']
user['following_count'] = raw_data['profile']['user']['socialStats']['usersFollowedCount']
user['followers_count'] = raw_data['profile']['user']['socialStats']['usersFollowedByCount']
users.append(user)
except:
continue
num += 1
print(username)
print(num)
return pd.read_json(json.dumps(users))
if __name__ == '__main__':
if not os.path.exists('./result'):
os.mkdir('./result')
users_data = read_users()
users_data.to_csv('./result/users_raw_data.csv', sep='\t', encoding='utf-8')
| mit |
mjirik/larVolumeToObj | larVolumeToObj/computation/old/step_calcchains_tobinary.py | 2 | 8706 | # -*- coding: utf-8 -*-
from lar import *
from scipy import *
import json
import scipy
import numpy as np
import time as tm
import gc
from pngstack2array3d import *
import struct
import getopt, sys
import traceback
#
import matplotlib.pyplot as plt
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 0;
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__=="__main__" and n <= logging_level:
for s in l:
print "Log:", s;
timer = 1;
timer_last = tm.time()
def timer_start(s):
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer start:" + s]);
timer_last = tm.time();
def timer_stop():
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer stop :" + str(tm.time() - timer_last)]);
# ------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------
PNG_EXTENSION = ".png"
BIN_EXTENSION = ".bin"
# ------------------------------------------------------------
# Utility toolbox
# ------------------------------------------------------------
def countFilesInADir(directory):
return len(os.walk(directory).next()[2])
def isArrayEmpty(arr):
return all(e == 0 for e in arr)
# ------------------------------------------------------------
def writeOffsetToFile(file, offsetCurr):
file.write( struct.pack('>I', offsetCurr[0]) )
file.write( struct.pack('>I', offsetCurr[1]) )
file.write( struct.pack('>I', offsetCurr[2]) )
# ------------------------------------------------------------
def computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, INPUT_DIR,DIR_O):
beginImageStack = 0
endImage = beginImageStack
MAX_CHAINS = colors
count = 0
LISTA_VETTORI = {}
LISTA_VETTORI2 = {}
LISTA_OFFSET = {}
fileName = "selettori-"
if (calculateout == True):
fileName = "output-"
saveTheColors = centroidsCalc
saveTheColors = sorted(saveTheColors.reshape(1,colors)[0])
OUTFILES = {}
for currCol in saveTheColors:
OUTFILES.update( { str(currCol): open(DIR_O+'/'+fileName+str(currCol)+BIN_EXTENSION, "wb") } )
for zBlock in range(imageDepth/imageDz):
startImage = endImage
endImage = startImage + imageDz
xEnd, yEnd = 0,0
theImage,colors,theColors = pngstack2array3d(INPUT_DIR, startImage, endImage, colors, pixelCalc, centroidsCalc)
# TODO: test this reshape for 3 colors
theColors = theColors.reshape(1,colors)
if (sorted(theColors[0]) != saveTheColors):
log(1, [ "Error: colors have changed"] )
sys.exit(2)
for xBlock in range(imageHeight/imageDx):
for yBlock in range(imageWidth/imageDy):
xStart, yStart = xBlock * imageDx, yBlock * imageDy
xEnd, yEnd = xStart+imageDx, yStart+imageDy
image = theImage[:, xStart:xEnd, yStart:yEnd]
nz,nx,ny = image.shape
count += 1
# Compute a quotient complex of chains with constant field
# ------------------------------------------------------------
chains3D_old = {};
chains3D = {};
for currCol in saveTheColors:
chains3D_old.update({str(currCol): []})
if (calculateout != True):
chains3D.update({str(currCol): np.zeros(nx*ny*nz,dtype=int32)})
zStart = startImage - beginImageStack;
def addr(x,y,z): return x + (nx) * (y + (ny) * (z))
hasSomeOne = {}
for currCol in saveTheColors:
hasSomeOne.update(str(currCol), False)
if (calculateout == True):
for x in range(nx):
for y in range(ny):
for z in range(nz):
for currCol in saveTheColors:
if (image[z,x,y] == currCol):
tmpChain = chains3D_old[str(currCol)]
tmpChain.append(addr(x,y,z))
chains3D_old.update({str(currCol): tmpChain})
else:
for x in range(nx):
for y in range(ny):
for z in range(nz):
for currCol in saveTheColors:
if (image[z,x,y] == currCol):
tmpChain = chains3D[str(currCol)]
tmpChain[addr(x,y,z)] = 1
chains3D.update({str(currCol): tmpChain})
hasSomeOne.update(str(currCol), True)
# Compute the boundary complex of the quotient cell
# ------------------------------------------------------------
objectBoundaryChain = {}
if (calculateout == True):
for currCol in saveTheColors:
if (len(chains3D_old[str(currCol)]) > 0):
objectBoundaryChain.update( {str(currCol): larBoundaryChain(bordo3,chains3D_old[str(currCol)])} )
else:
objectBoundaryChain.update( {str(currCol): None} )
# Save
for currCol in saveTheColors:
if (calculateout == True):
if (objectBoundaryChain[str(currCol)] != None):
writeOffsetToFile( OUTFILES[colorLenStr], np.array([zStart,xStart,yStart], dtype=int32) )
OUTFILES[colorLenStr].write( bytearray( np.array(objectBoundaryChain[str(currCol)].toarray().astype('b').flatten()) ) )
else:
if (hasSomeOne[str(currCol)] != False):
writeOffsetToFile( OUTFILES[colorLenStr], np.array([zStart,xStart,yStart], dtype=int32) )
OUTFILES[colorLenStr].write( bytearray( np.array(chains3D[str(currCol)], dtype=np.dtype('b')) ) )
for currCol in saveTheColors:
OUTFILES[str(currCol)].flush()
OUTFILES[str(currCol)].close()
def runComputation(imageDx,imageDy,imageDz, colors,calculateout, V,FV, INPUT_DIR,BEST_IMAGE,BORDER_FILE,DIR_O):
bordo3 = None
if (calculateout == True):
with open(BORDER_FILE, "r") as file:
bordo3_json = json.load(file)
ROWCOUNT = bordo3_json['ROWCOUNT']
COLCOUNT = bordo3_json['COLCOUNT']
ROW = np.asarray(bordo3_json['ROW'], dtype=np.int32)
COL = np.asarray(bordo3_json['COL'], dtype=np.int32)
DATA = np.asarray(bordo3_json['DATA'], dtype=np.int8)
bordo3 = csr_matrix((DATA,COL,ROW),shape=(ROWCOUNT,COLCOUNT));
imageHeight,imageWidth = getImageData(INPUT_DIR+str(BEST_IMAGE)+PNG_EXTENSION)
imageDepth = countFilesInADir(INPUT_DIR)
Nx,Ny,Nz = imageHeight/imageDx, imageWidth/imageDx, imageDepth/imageDz
try:
pixelCalc, centroidsCalc = centroidcalc(INPUT_DIR, BEST_IMAGE, colors)
computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, INPUT_DIR,DIR_O)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
sys.exit(2)
def main(argv):
ARGS_STRING = 'Args: -r -b <borderfile> -x <borderX> -y <borderY> -z <borderZ> -i <inputdirectory> -c <colors> -o <outputdir> -q <bestimage>'
try:
opts, args = getopt.getopt(argv,"rb:x:y:z:i:c:o:q:")
except getopt.GetoptError:
print ARGS_STRING
sys.exit(2)
nx = ny = nz = imageDx = imageDy = imageDz = 64
colors = 2
mandatory = 5
calculateout = False
#Files
BORDER_FILE = 'bordo3.json'
BEST_IMAGE = ''
DIR_IN = ''
DIR_O = ''
for opt, arg in opts:
if opt == '-x':
nx = ny = nz = imageDx = imageDy = imageDz = int(arg)
mandatory = mandatory - 1
elif opt == '-y':
ny = nz = imageDy = imageDz = int(arg)
elif opt == '-z':
nz = imageDz = int(arg)
elif opt == '-r':
calculateout = True
elif opt == '-i':
DIR_IN = arg + '/'
mandatory = mandatory - 1
elif opt == '-b':
BORDER_FILE = arg
mandatory = mandatory - 1
elif opt == '-o':
mandatory = mandatory - 1
DIR_O = arg
elif opt == '-c':
mandatory = mandatory - 1
colors = int(arg)
elif opt == '-q':
BEST_IMAGE = int(arg)
if mandatory != 0:
print 'Not all arguments where given'
print ARGS_STRING
sys.exit(2)
def ind(x,y,z): return x + (nx+1) * (y + (ny+1) * (z))
def invertIndex(nx,ny,nz):
nx,ny,nz = nx+1,ny+1,nz+1
def invertIndex0(offset):
a0, b0 = offset / nx, offset % nx
a1, b1 = a0 / ny, a0 % ny
a2, b2 = a1 / nz, a1 % nz
return b0,b1,b2
return invertIndex0
chunksize = nx * ny + nx * nz + ny * nz + 3 * nx * ny * nz
V = [[x,y,z] for z in range(nz+1) for y in range(ny+1) for x in range(nx+1) ]
v2coords = invertIndex(nx,ny,nz)
FV = []
for h in range(len(V)):
x,y,z = v2coords(h)
if (x < nx) and (y < ny): FV.append([h,ind(x+1,y,z),ind(x,y+1,z),ind(x+1,y+1,z)])
if (x < nx) and (z < nz): FV.append([h,ind(x+1,y,z),ind(x,y,z+1),ind(x+1,y,z+1)])
if (y < ny) and (z < nz): FV.append([h,ind(x,y+1,z),ind(x,y,z+1),ind(x,y+1,z+1)])
runComputation(imageDx, imageDy, imageDz, colors, calculateout, V, FV, DIR_IN, BEST_IMAGE, BORDER_FILE, DIR_O)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/io/demo.py | 4 | 8227 | from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from ..core import tokenize, DataFrame
from .io import from_delayed
from ...delayed import delayed
from ...utils import random_state_data
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert',
'Oliver', 'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula',
'Victor', 'Wendy', 'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, state_data):
index = pd.DatetimeIndex(start=start, end=end, freq=freq, name='timestamp')
state = np.random.RandomState(state_data)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start='2000-01-01',
end='2000-12-31',
dtypes={'name': str, 'id': int, 'x': float, 'y': float},
freq='10s',
partition_freq='1M',
seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state_data = random_state_data(len(divisions) - 1, seed)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq,
partition_freq, state_data)
dsk = {(name, i): (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, state_data[i])
for i in range(len(divisions) - 1)}
head = make_timeseries_part('2000', '2000', dtypes, '1H', state_data[0])
return DataFrame(dsk, name, head, divisions)
def generate_day(date, open, high, low, close, volume,
freq=pd.Timedelta(seconds=60), random_state=None):
""" Generate a day of financial data from open/close high/low values """
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
if not isinstance(date, pd.Timestamp):
date = pd.Timestamp(date)
if not isinstance(freq, pd.Timedelta):
freq = pd.Timedelta(freq)
time = pd.date_range(date + pd.Timedelta(hours=9),
date + pd.Timedelta(hours=12 + 4),
freq=freq / 5, name='timestamp')
n = len(time)
while True:
values = (random_state.random_sample(n) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min()) # scale
values += np.linspace(open - values[0], close - values[-1],
len(values)) # endpoints
assert np.allclose(open, values[0])
assert np.allclose(close, values[-1])
mx = max(close, open)
mn = min(close, open)
ind = values > mx
values[ind] = (values[ind] - mx) * (high - mx) / (values.max() - mx) + mx
ind = values < mn
values[ind] = (values[ind] - mn) * (low - mn) / (values.min() - mn) + mn
# The process fails if min/max are the same as open close. This is rare
if (np.allclose(values.max(), high) and np.allclose(values.min(), low)):
break
s = pd.Series(values.round(3), index=time)
rs = s.resample(freq)
# TODO: add in volume
return pd.DataFrame({'open': rs.first(),
'close': rs.last(),
'high': rs.max(),
'low': rs.min()})
def daily_stock(symbol, start, stop, freq=pd.Timedelta(seconds=1),
data_source='yahoo', random_state=None):
""" Create artificial stock data
This data matches daily open/high/low/close values from Yahoo! Finance, but
interpolates values within each day with random values. This makes the
results look natural without requiring the downloading of large volumes of
data. This is useful for education and benchmarking.
Parameters
----------
symbol: string
A stock symbol like "GOOG" or "F"
start: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
stop: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
freq: timedelta, str, or pd.Timedelta
The frequency of sampling
data_source: str, optional
defaults to 'yahoo'. See pandas_datareader.data.DataReader for options
random_state: int, np.random.RandomState object
random seed, defaults to randomly chosen
Examples
--------
>>> import dask.dataframe as dd # doctest: +SKIP
>>> df = dd.demo.daily_stock('GOOG', '2010', '2011', freq='1s') # doctest: +SKIP
>>> df # doctest: +SKIP
Dask DataFrame Structure:
close high low open
npartitions=252
2010-01-04 09:00:00 float64 float64 float64 float64
2010-01-05 09:00:00 ... ... ... ...
... ... ... ... ...
2010-12-31 09:00:00 ... ... ... ...
2010-12-31 16:00:00 ... ... ... ...
Dask Name: from-delayed, 504 tasks
>>> df.head() # doctest: +SKIP
close high low open
timestamp
2010-01-04 09:00:00 626.944 626.964 626.944 626.951
2010-01-04 09:00:01 626.906 626.931 626.906 626.931
2010-01-04 09:00:02 626.901 626.911 626.901 626.905
2010-01-04 09:00:03 626.920 626.920 626.905 626.905
2010-01-04 09:00:04 626.894 626.917 626.894 626.906
"""
from pandas_datareader import data
df = data.DataReader(symbol, data_source, start, stop)
seeds = random_state_data(len(df), random_state=random_state)
parts = []
divisions = []
for i, seed in zip(range(len(df)), seeds):
s = df.iloc[i]
if s.isnull().any():
continue
part = delayed(generate_day)(s.name, s.loc['Open'], s.loc['High'], s.loc['Low'],
s.loc['Close'], s.loc['Volume'],
freq=freq, random_state=seed)
parts.append(part)
divisions.append(s.name + pd.Timedelta(hours=9))
divisions.append(s.name + pd.Timedelta(hours=12 + 4))
meta = generate_day('2000-01-01', 1, 2, 0, 1, 100)
return from_delayed(parts, meta=meta, divisions=divisions)
| gpl-3.0 |
Jiangshangmin/mpld3 | examples/heart_path.py | 19 | 3958 | """
Patches and Paths
=================
This is a demo adapted from a `matplotlib gallery example
<http://matplotlib.org/examples/shapes_and_collections/path_patch_demo.html>`_
This example adds a custom D3 plugin allowing the user to drag the path
control-points and see the effect on the path.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import mpld3
from mpld3 import plugins, utils
class LinkedDragPlugin(plugins.PluginBase):
JAVASCRIPT = r"""
mpld3.register_plugin("drag", DragPlugin);
DragPlugin.prototype = Object.create(mpld3.Plugin.prototype);
DragPlugin.prototype.constructor = DragPlugin;
DragPlugin.prototype.requiredProps = ["idpts", "idline", "idpatch"];
DragPlugin.prototype.defaultProps = {}
function DragPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
DragPlugin.prototype.draw = function(){
var patchobj = mpld3.get_element(this.props.idpatch, this.fig);
var ptsobj = mpld3.get_element(this.props.idpts, this.fig);
var lineobj = mpld3.get_element(this.props.idline, this.fig);
var drag = d3.behavior.drag()
.origin(function(d) { return {x:ptsobj.ax.x(d[0]),
y:ptsobj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
lineobj.path.attr("d", lineobj.datafunc(ptsobj.offsets));
patchobj.path.attr("d", patchobj.datafunc(ptsobj.offsets,
patchobj.pathcodes));
lineobj.data = ptsobj.offsets;
patchobj.data = ptsobj.offsets;
ptsobj.elements()
.data(ptsobj.offsets)
.style("cursor", "default")
.call(drag);
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
function dragged(d, i) {
d[0] = ptsobj.ax.x.invert(d3.event.x);
d[1] = ptsobj.ax.y.invert(d3.event.y);
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
lineobj.path.attr("d", lineobj.datafunc(ptsobj.offsets));
patchobj.path.attr("d", patchobj.datafunc(ptsobj.offsets,
patchobj.pathcodes));
}
function dragended(d, i) {
d3.select(this).classed("dragging", false);
}
}
mpld3.register_plugin("drag", DragPlugin);
"""
def __init__(self, points, line, patch):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"idpts": utils.get_id(points, suffix),
"idline": utils.get_id(line),
"idpatch": utils.get_id(patch)}
fig, ax = plt.subplots()
Path = mpath.Path
path_data = [
(Path.MOVETO, (1.58, -2.57)),
(Path.CURVE4, (0.35, -1.1)),
(Path.CURVE4, (-1.75, 2.0)),
(Path.CURVE4, (0.375, 2.0)),
(Path.LINETO, (0.85, 1.15)),
(Path.CURVE4, (2.2, 3.2)),
(Path.CURVE4, (3, 0.05)),
(Path.CURVE4, (2.0, -0.5)),
(Path.CLOSEPOLY, (1.58, -2.57)),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, facecolor='r', alpha=0.5)
ax.add_patch(patch)
# plot control points and connecting lines
x, y = zip(*path.vertices[:-1])
points = ax.plot(x, y, 'go', ms=10)
line = ax.plot(x, y, '-k')
ax.grid(True, color='gray', alpha=0.5)
ax.axis('equal')
ax.set_title("Drag Points to Change Path", fontsize=18)
plugins.connect(fig, LinkedDragPlugin(points[0], line[0], patch))
mpld3.show()
| bsd-3-clause |
thientu/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 83 | 34544 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
waterponey/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 13 | 5539 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
DuCorey/bokeh | examples/models/file/anscombe.py | 12 | 3015 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.layouts import gridplot
from bokeh.models.glyphs import Circle, Line
from bokeh.models import ColumnDataSource, Grid, LinearAxis, Plot, Range1d
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(x_range=xdr, y_range=ydr, plot_width=400, plot_height=400,
border_fill_color='white', background_fill_color='#e9e0db')
plot.title.text = title
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = gridplot([[I, II], [III, IV]], toolbar_location=None)
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
doc.validate()
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
energyPATHWAYS/energyPATHWAYS | energyPATHWAYS/shape.py | 1 | 25285 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 05 14:45:48 2015
@author: ryan
"""
import config as cfg
import datamapfunctions as dmf
import util
import pandas as pd
import pytz
import datetime as DT
# PyCharm complains about dateutil not being listed in the project requirements, but my understanding is that
# it is bundled with matplotlib, so it is listed implicitly.
from dateutil.relativedelta import relativedelta
import time
import numpy as np
import cPickle as pickle
import os
import logging
import helper_multiprocess
import pdb
import numpy as np
#http://stackoverflow.com/questions/27491988/canonical-offset-from-utc-using-pytz
def is_leap_year(year):
# https://support.microsoft.com/en-us/kb/214019
if year % 4:
if year % 100 and year % 400:
return False
else:
return True
else:
return True
def num_active_years(active_dates_index):
unique_years = sorted(list(set(active_dates_index.year)))
year_counts = [sum(active_dates_index.year==y) for y in unique_years]
years = sum([yc/(8784. if is_leap_year(y) else 8760.) for y, yc in zip(unique_years, year_counts)])
return min(1., years) # we normalize up to one year
class Shapes(object):
def __init__(self):
self.data = {}
self.sql_id_table = 'Shapes'
self.active_shape_ids = []
self.start_date = None
self.end_date = None
self._geography_check = None
self._timespan_check = None
self._version = version
def create_empty_shapes(self):
""" This should be called first as it creates a record of all of the shapes that are in the database."""
for id in util.sql_read_table(self.sql_id_table, column_names='id', return_unique=True, return_iterable=True):
self.data[id] = Shape(id)
self.active_shape_ids.append(id)
def initiate_active_shapes(self):
logging.info(' reading data for:')
if cfg.cfgfile.get('case', 'parallel_process').lower() == 'true':
shapes = helper_multiprocess.safe_pool(helper_multiprocess.shapes_populate, self.data.values())
self.data = dict(zip(self.data.keys(), shapes))
else:
for id in self.active_shape_ids:
shape = self.data[id]
logging.info(' shape: ' + shape.name)
if hasattr(shape, 'raw_values'):
return
shape.read_timeseries_data()
for id in self.active_shape_ids:
shape = self.data[id]
if shape.shape_type=='weather date':
shape.convert_index_to_datetime('raw_values', 'weather_datetime')
date_position = util.position_in_index(shape.raw_values, 'weather_datetime')
shape.start_date, shape.end_date = min(shape.raw_values.index.levels[date_position]), max(shape.raw_values.index.levels[date_position])
self.start_date = shape.start_date if self.start_date is None else max(shape.start_date, self.start_date)
self.end_date = shape.end_date if self.end_date is None else min(shape.end_date, self.end_date)
self.set_active_dates()
def set_active_dates(self):
requested_shape_start_date = cfg.shape_start_date
if requested_shape_start_date:
# self.start_date and self.end_date could be None here if we encountered no 'weather date' shapes
# in initiate_active_shapes().
if (self.start_date is None or requested_shape_start_date >= self.start_date) and\
(self.end_date is None or requested_shape_start_date <= self.end_date):
self.start_date = requested_shape_start_date
shape_years = cfg.shape_years or 1
# Need to subtract an hour because all timestamps are hour-beginning.
requested_shape_end_date = self.start_date + relativedelta(years=shape_years, hours=-1)
# We only need to check the "right hand" boundary here, because: A) we already confirmed that the
# start_date was within the allowable range, and B) config.py requires shape_years to be positive.
# In other words, there's no way at this point that requested_shape_end_date could be < self.start_date
if self.end_date is None or requested_shape_end_date <= self.end_date:
self.end_date = requested_shape_end_date
else:
raise ValueError("The requested shape_start_date from your config plus the requested shape_years "
"give an end date of {}, which is after the end date of at least one of "
"your shapes.".format(requested_shape_end_date))
else:
raise ValueError("The requested shape_start_date from your config ({}) is outside the range of dates "
"available in your shapes.".format(requested_shape_start_date))
# This is a last resort; it's unlikely that we could get here without either:
# A) encountering at least one weather_datetime shape in initiate_active_shapes(), or
# B) having a shape_start_date requested in the config
# but if both of those have happened, we set the date range to the case's current year
if self.start_date is self.end_date is None:
self.start_date = DT.datetime(int(cfg.cfgfile.get('case', 'current_year')), 1, 1)
self.end_date = DT.datetime(int(cfg.cfgfile.get('case', 'current_year')), 12, 31, 23)
logging.debug("shape_start_date: {}, shape_years: {}, start_date: {}, end_date: {}".format(
cfg.shape_start_date, cfg.shape_years, self.start_date, self.end_date))
self.active_dates_index = pd.date_range(self.start_date, self.end_date, freq='H')
self.time_slice_elements = self.create_time_slice_elements(self.active_dates_index)
for id in self.active_shape_ids:
self.data[id].active_dates_index = self.active_dates_index
self.data[id].time_slice_elements = self.time_slice_elements
def process_active_shapes(self):
#run the weather date shapes first because they inform the daterange for dispatch
logging.info(' mapping data for:')
if cfg.cfgfile.get('case','parallel_process').lower() == 'true':
shapes = helper_multiprocess.safe_pool(helper_multiprocess.process_shapes, self.data.values())
self.data = dict(zip(self.data.keys(), shapes))
else:
for id in self.active_shape_ids:
self.data[id].process_shape()
dispatch_outputs_timezone_id = int(cfg.cfgfile.get('case', 'dispatch_outputs_timezone_id'))
self.dispatch_outputs_timezone = pytz.timezone(cfg.geo.timezone_names[dispatch_outputs_timezone_id])
self.active_dates_index = pd.date_range(self.active_dates_index[0], periods=len(self.active_dates_index), freq='H', tz=self.dispatch_outputs_timezone)
self.num_active_years = num_active_years(self.active_dates_index)
self._geography_check = (cfg.primary_geography_id, tuple(sorted(cfg.primary_subset_id)), tuple(cfg.breakout_geography_id))
self._timespan_check = (cfg.shape_start_date, cfg.shape_years)
@staticmethod
def create_time_slice_elements(active_dates_index):
business_days = pd.bdate_range(active_dates_index[0].date(), active_dates_index[-1].date())
biz_map = {v: k for k, v in util.sql_read_table('DayType', column_names='*', return_iterable=False)}
time_slice_elements = {}
for ti in cfg.time_slice_col:
if ti=='day_type':
time_slice_elements['day_type'] = np.array([biz_map['workday'] if s.date() in business_days else biz_map['non-workday'] for s in active_dates_index], dtype=int)
else:
time_slice_elements[ti] = getattr(active_dates_index, ti)
time_slice_elements['hour24'] = time_slice_elements['hour'] + 1
return time_slice_elements
def make_flat_load_shape(self, index, column='value'):
assert 'weather_datetime' in index.names
flat_shape = util.empty_df(fill_value=1., index=index, columns=[column])
group_to_normalize = [n for n in flat_shape.index.names if n!='weather_datetime']
flat_shape = flat_shape.groupby(level=group_to_normalize).transform(lambda x: x / x.sum())*self.num_active_years
return flat_shape
class Shape(dmf.DataMapFunctions):
def __init__(self, id):
self.id = id
self.sql_id_table = 'Shapes'
self.sql_data_table = 'ShapesData'
for col, att in util.object_att_from_table(self.sql_id_table, id):
setattr(self, col, att)
dmf.DataMapFunctions.__init__(self, data_id_key='parent_id')
# needed for parallel process
self.workingdir = cfg.workingdir
self.cfgfile_name = cfg.cfgfile_name
self.log_name = cfg.log_name
def create_empty_shape_data(self):
self._active_time_keys = [ind for ind in self.raw_values.index.names if ind in cfg.time_slice_col]
self._active_time_dict = dict([(ind, loc) for loc, ind in enumerate(self.raw_values.index.names) if ind in cfg.time_slice_col])
self._non_time_keys = [ind for ind in self.raw_values.index.names if ind not in self._active_time_keys]
self._non_time_dict = dict([(ind, loc) for loc, ind in enumerate(self.raw_values.index.names) if ind in self._non_time_keys])
data = pd.DataFrame(index=pd.Index(self.active_dates_index, name='weather_datetime'), columns=['value'])
for ti in self._active_time_keys:
#hour is given as 1-24 not 0-23
if ti=='hour' and min(self.raw_values.index.levels[self._active_time_dict['hour']])==1 and max(self.raw_values.index.levels[self._active_time_dict['hour']])==24:
# the minimum value is 1 and max value is 24
data[ti] = self.time_slice_elements['hour24']
else:
data[ti] = self.time_slice_elements[ti]
non_time_levels = [list(l) for l, n in zip(self.raw_values.index.levels, self.raw_values.index.names) if n in self._non_time_keys]
# this next step could be done outside of a for loop, but I'm not able to get the Pandas syntax to take
for name, level in zip(self._non_time_keys, non_time_levels):
data = pd.concat([data]*len(level), keys=level, names=[name])
try:
data.reset_index(inplace=True)
except:
pdb.set_trace()
data.set_index(self._non_time_keys+self._active_time_keys+['weather_datetime'], inplace=True)
data.sort(inplace=True)
return data
def process_shape(self):
logging.info(' shape: ' + self.name)
self.num_active_years = num_active_years(self.active_dates_index)
if self.shape_type=='weather date':
self.values = util.reindex_df_level_with_new_elements(self.raw_values, 'weather_datetime', self.active_dates_index)
self.values = self.values.replace(np.nan,0)# this step is slow, consider replacing
if self.values.isnull().values.any():
raise ValueError('Weather data for shape {} did not give full coverage of the active dates'.format(self.name))
elif self.shape_type=='time slice':
self.values = self.create_empty_shape_data()
non_time_elements_in_levels = [list(util.get_elements_from_level(self.values, e)) for e in self._non_time_keys]
time_elements_in_levels = [list(util.get_elements_from_level(self.values, e)) for e in self._active_time_keys]
for ind, value in self.raw_values.iterrows():
non_time_portion = [ind[self._non_time_dict[e]] for e in self._non_time_keys]
time_portion = [ind[self._active_time_dict[e]] for e in self._active_time_keys]
if not np.all([s in l for s, l in zip(non_time_portion+time_portion, non_time_elements_in_levels+time_elements_in_levels)]):
continue
indexer = tuple(non_time_portion + time_portion + [slice(None)])
if self.shape_unit_type=='energy':
len_slice = len(self.values.loc[indexer])
self.values.loc[indexer] = value[0]/float(len_slice)*self.num_active_years
elif self.shape_unit_type=='power':
self.values.loc[indexer] = value[0]
if self.values.isnull().values.any():
raise ValueError('Shape time slice data did not give full coverage of the active dates')
# reindex to remove the helper columns
self.values.index = self.values.index.droplevel(self._active_time_keys)
self.values = cfg.geo.filter_extra_geos_from_df(self.values.swaplevel('weather_datetime', -1).sort())
self.geomap_to_time_zone()
self.localize_shapes()
self.standardize_time_across_timezones()
self.geomap_to_primary_geography()
self.sum_over_time_zone()
self.normalize()
self.add_timeshift_type()
# raw values can be very large, so we delete it in this one case
del self.raw_values
def add_timeshift_type(self):
"""Later these shapes will need a level called timeshift type, and it is faster to add it now if it doesn't already have it"""
if 'timeshift_type' not in self.values.index.names:
self.values['timeshift_type'] = 2 # index two is the native demand shape
self.values = self.values.set_index('timeshift_type', append=True).swaplevel('timeshift_type', 'weather_datetime').sort_index()
def normalize(self):
group_to_normalize = [n for n in self.values.index.names if n!='weather_datetime']
# here is a special case where I have p_min and p_max in my dispatch constraints and these should not be normalized
if 'dispatch_constraint' in group_to_normalize:
# this first normailization does what we need for hydro pmin and pmax, which is a special case of normalization
combined_map_df = util.DfOper.mult((self.map_df_tz, self.map_df_primary))
normalization_factors = combined_map_df.groupby(level=cfg.primary_geography).sum()
self.values = util.DfOper.divi((self.values, normalization_factors))
temp = self.values.groupby(level=group_to_normalize).transform(lambda x: x / x.sum())*self.num_active_years
# TODO: 2, and 3 should not be hard coded here, they represent p_min and p_max
indexer = util.level_specific_indexer(temp, 'dispatch_constraint', [[2,3]])
temp.loc[indexer, :] = self.values.loc[indexer, :]
self.values = temp
else:
self.values = self.values.groupby(level=group_to_normalize).transform(lambda x: x / x.sum())*self.num_active_years
def geomap_to_time_zone(self, attr='values', inplace=True):
""" maps a dataframe to another geography using relational GeographyMapdatabase table
"""
geography_map_key = cfg.cfgfile.get('case', 'default_geography_map_key') if not hasattr(self, 'geography_map_key') else self.geography_map_key
# create dataframe with map from one geography to another
# we always want to normalize as a total here because we will re-sum over time zone later
self.map_df_tz = cfg.geo.map_df(self.geography, 'time zone', normalize_as='total', map_key=geography_map_key)
mapped_data = util.DfOper.mult([getattr(self, attr), self.map_df_tz])
mapped_data = mapped_data.swaplevel('weather_datetime', -1)
if inplace:
setattr(self, attr, mapped_data.sort())
else:
return mapped_data.sort()
def geomap_to_primary_geography(self, attr='values', inplace=True):
""" maps the dataframe to primary geography
"""
geography_map_key = cfg.cfgfile.get('case', 'default_geography_map_key') if not hasattr(self, 'geography_map_key') else self.geography_map_key
self.map_df_primary = cfg.geo.map_df(self.geography, cfg.primary_geography, normalize_as=self.input_type, map_key=geography_map_key)
mapped_data = util.DfOper.mult((getattr(self, attr), self.map_df_primary), fill_value=None)
if self.geography!=cfg.primary_geography and self.geography!='time zone':
mapped_data = util.remove_df_levels(mapped_data, self.geography)
mapped_data = mapped_data.swaplevel('weather_datetime', -1)
if inplace:
setattr(self, attr, mapped_data.sort())
else:
return mapped_data.sort()
def sum_over_time_zone(self, attr='values', inplace=True):
converted_geography = cfg.primary_geography
if converted_geography=='time zone':
if inplace:
return
else:
return getattr(self, attr)
levels = [ind for ind in getattr(self, attr).index.names if ind!='time zone']
df = getattr(self, attr).groupby(level=levels).sum()
df.sort(inplace=True)
if inplace:
setattr(self, attr, df)
else:
return df
def standardize_time_across_timezones(self, attr='values', inplace=True):
self.final_dates_index = pd.date_range(self.active_dates_index[0], periods=len(self.active_dates_index), freq='H', tz=self.dispatch_outputs_timezone)
df = util.reindex_df_level_with_new_elements(getattr(self, attr).copy(), 'weather_datetime', self.final_dates_index)
levels = [n for n in self.values.index.names if n!='weather_datetime']
df = df.groupby(level=levels).fillna(method='bfill').fillna(method='ffill')
if inplace:
setattr(self, attr, df)
else:
return df
def localize_shapes(self, attr='values', inplace=True):
""" Step through time zone and put each profile maped to time zone in that time zone
"""
dispatch_outputs_timezone_id = int(cfg.cfgfile.get('case', 'dispatch_outputs_timezone_id'))
self.dispatch_outputs_timezone = pytz.timezone(cfg.geo.timezone_names[dispatch_outputs_timezone_id])
new_df = []
for tz_id, group in getattr(self, attr).groupby(level='time zone'):
# get the time zone name and figure out the offset from UTC
tz_id = tz_id if self.time_zone_id is None else self.time_zone_id
tz = pytz.timezone(cfg.geo.timezone_names[tz_id])
_dt = DT.datetime(2015, 1, 1)
offset = (tz.utcoffset(_dt) + tz.dst(_dt)).total_seconds()/60.
# localize and then convert to dispatch_outputs_timezone
df = group.tz_localize(pytz.FixedOffset(offset), level='weather_datetime')
new_df.append(df)
if inplace:
setattr(self, attr, pd.concat(new_df).tz_convert(self.dispatch_outputs_timezone, level='weather_datetime').sort_index())
else:
return pd.concat(new_df).tz_convert(self.dispatch_outputs_timezone, level='weather_datetime').sort_index()
def convert_index_to_datetime(self, dataframe_name, index_name='weather_datetime'):
df = getattr(self, dataframe_name)
names = df.index.names
df.reset_index(inplace=True)
df['weather_datetime'] = cfg.date_lookup.lookup(self.raw_values['weather_datetime'])
df['weather_datetime'].freq = 'H'
df.set_index(names, inplace=True)
df.sort_index(inplace=True)
@staticmethod
def ensure_feasible_flexible_load(df):
names = [n for n in df.index.names if n != 'weather_datetime']
cum_df = df.groupby(level=names).cumsum()
add_to_1 = min(0, (cum_df[2] - cum_df[1]).min())*1.01
subtract_from_3 = min(0, (cum_df[3] - cum_df[2]).min())*1.01
if add_to_1 < 0:
df.iloc[0,0] += add_to_1
cum_df = df[1].groupby(level=names).cumsum()
make_zero = np.nonzero(cum_df.values<0)[0]
if len(make_zero):
replace = make_zero[-1] + 1
df.iloc[make_zero, 0] = 0
df.iloc[replace, 0] = cum_df.iloc[replace]
df.iloc[-1, 0] += (df[2].sum() - df[1].sum())
if subtract_from_3 < 0:
df.iloc[0,2] -= subtract_from_3
cum_df = df[3].groupby(level=names).cumsum()
cum_diff = df[2].sum() - cum_df
make_zero = np.nonzero(cum_diff.values<0)[0][1:]
if len(make_zero):
replace = make_zero[0] - 1
df.iloc[make_zero, 2] = 0
df.iloc[replace, 2] += cum_diff.iloc[replace]
else:
df.iloc[-1, 2] += cum_diff.iloc[-1]
cum_df = df.groupby(level=names).cumsum()
if ((cum_df[1] - cum_df[2]) > 1E-12).any():
logging.error('Infeasible flexible load constraints were created where the delayed load shape is greater than the native load shape')
logging.error(cum_df[cum_df[1] > cum_df[2]])
pdb.set_trace()
if ((cum_df[2] - cum_df[3]) > 1E-12).any():
logging.error('Infeasible flexible load constraints were created where the advanced load shape is less than the native load shape')
logging.error(cum_df[cum_df[2] > cum_df[3]])
pdb.set_trace()
return df
@staticmethod
def produce_flexible_load(shape_df, percent_flexible=None, hr_delay=None, hr_advance=None):
hr_delay = 0 if hr_delay is None else hr_delay
hr_advance = 0 if hr_advance is None else hr_advance
native_slice = shape_df.xs(2, level='timeshift_type')
native_slice_stacked = pd.concat([native_slice]*3, keys=[1,2,3], names=['timeshift_type'])
pflex_stacked = pd.concat([percent_flexible]*3, keys=[1,2,3], names=['timeshift_type'])
timeshift_levels = sorted(list(util.get_elements_from_level(shape_df, 'timeshift_type')))
if timeshift_levels==[1, 2, 3]:
# here, we have flexible load profiles already specified by the user
names = shape_df.index.names
full_load = shape_df.squeeze().unstack('timeshift_type')
group_by_names = [n for n in full_load.index.names if n != 'weather_datetime']
full_load = full_load.groupby(level=group_by_names).apply(Shape.ensure_feasible_flexible_load)
full_load = full_load.stack('timeshift_type').reorder_levels(names).sort_index().to_frame()
full_load.columns = ['value']
elif timeshift_levels==[2]:
non_weather = [n for n in native_slice.index.names if n!='weather_datetime']
# positive hours is a shift forward, negative hours a shift back
shift = lambda df, hr: df.shift(hr).ffill().fillna(value=0)
delay_load = native_slice.groupby(level=non_weather).apply(shift, hr=hr_delay)
def advance_load_function(df, hr):
df_adv = df.shift(-hr).ffill().fillna(value=0)
df_adv.iloc[0] += df.iloc[:hr].sum().sum()
return df_adv
advance_load = native_slice.groupby(level=non_weather).apply(advance_load_function, hr=hr_advance)
full_load = pd.concat([delay_load, native_slice, advance_load], keys=[1,2,3], names=['timeshift_type'])
else:
raise ValueError("elements in the level timeshift_type are not recognized")
return util.DfOper.add((util.DfOper.mult((full_load, pflex_stacked), collapsible=False),
util.DfOper.mult((native_slice_stacked, 1-pflex_stacked), collapsible=False)))
# electricity shapes
force_rerun_shapes = False
version = 4 #change this when you need to force users to rerun shapes
shapes = Shapes()
def init_shapes(pickle_shapes=True):
global shapes
if os.path.isfile(os.path.join(cfg.workingdir, '{}_shapes.p'.format(cfg.primary_geography))):
logging.info('Loading shapes')
with open(os.path.join(cfg.workingdir, '{}_shapes.p'.format(cfg.primary_geography)), 'rb') as infile:
shapes = pickle.load(infile)
geography_check = (cfg.primary_geography_id, tuple(sorted(cfg.primary_subset_id)), tuple(cfg.breakout_geography_id))
timespan_check = (cfg.shape_start_date, cfg.shape_years)
if (shapes._version != version) or (shapes._geography_check != geography_check) or (shapes._timespan_check != timespan_check) or force_rerun_shapes:
logging.info('Processing shapes')
shapes.__init__()
shapes.create_empty_shapes()
shapes.initiate_active_shapes()
shapes.process_active_shapes()
if pickle_shapes:
logging.info('Pickling shapes')
with open(os.path.join(cfg.workingdir, '{}_shapes.p'.format(cfg.primary_geography)), 'wb') as outfile:
pickle.dump(shapes, outfile, pickle.HIGHEST_PROTOCOL)
| mit |
cerebis/meta-sweeper | bin/readthru_parser.py | 1 | 16212 | #!/usr/bin/env python
"""
meta-sweeper - for performing parametric sweeps of simulated
metagenomic sequencing experiments.
Copyright (C) 2016 "Matthew Z DeMaere"
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import pysam
import tqdm
from Bio import SeqIO
from Bio.Restriction import Restriction
from Bio.Restriction.Restriction_Dictionary import rest_dict, typedict
from intervaltree import IntervalTree
from itertools import product
def get_enzyme_instance_ipython(enz_name):
"""
An alternative method to fetch an instance of a given restriction enzyme by its
name using a work-around which avoids exception with getattr() in iPython.
Ackn: StackOverflow: user xbello.
See: http://stackoverflow.com/questions/20381912/type-object-restrictiontype-has-no-attribute-size
:param enz_name: the case-sensitive name of the enzyme
:return: RestrictionType the enzyme instance
"""
r_type_names = [rt for tid, (rt, rn) in typedict.iteritems() if enz_name in rn][0]
r_clz = tuple(getattr(Restriction, rt) for rt in r_type_names)
return Restriction.AbstractCut(enz_name, r_clz, rest_dict[enz_name])
def get_enzyme_instance(enz_name):
"""
Fetch an instance of a given restriction enzyme by its name.
:param enz_name: the case-sensitive name of the enzyme
:return: RestrictionType the enzyme instance
"""
return getattr(Restriction, enz_name)
def site_intervaltree(seq, enzyme):
"""
Initialise an intervaltree representation of an enzyme's cutsites across a given sequence.
Whether a position involves a cutsite can be queried with tree[x] or tree[x1:x2].
:param seq: the sequence to digest
:param enzyme: the restriction enzyme used in digestion
:return: an intervaltree of cutsites.
"""
tr = IntervalTree()
size = enzyme.size
offset = enzyme.fst3
for si in enzyme.search(seq):
start = si + offset - 1
tr.addi(start, start + size)
return tr
# Mapping of cigar characters to code values
CODE2CIGAR = dict((y, x) for x, y in enumerate("MIDNSHP=X"))
# Pattern that finds each unit of a cigar i.e. 10M or 9H
CIGAR_ANY = re.compile(r"(\d+)([MIDNSHP=X])")
# Pattern that find only matches (M)
CIGAR_MATCHES = re.compile(r'([0-9]+)M')
# Cigar codes that are not permitted.
NOT_ALLOWED = {2, 3, 6, 8}
def cigar_to_tuple(cigar):
"""
Convert a CIGAR string into code values
:param cigar:
:return:
"""
return [(CODE2CIGAR[t[1]], int(t[0])) for t in CIGAR_ANY.findall(cigar)]
def count_matches(cigar):
"""
Sum the length of aligned regions listed in CIGAR pattern
:param cigar: SAMtools CIGAR string
:return: total number of aligned bases
"""
return sum([int(seg_len) for seg_len in CIGAR_MATCHES.findall(cigar)])
def next_pair(it):
"""
Return a pair (R1/R2) of reacs from a name sorted bam file. Raises StopIteration
when the end of file reached. Order of reads is not guaranteed. i.e. R1,R2 or R2,R1.
The method will continue to scan a file until it finds a pair or eof.
:param it: a Pysam sam/bam file iterator
:return: tuple R1/R2 or R2/R1
"""
r1 = it.next()
while True:
# read records until we get a pair
r2 = it.next()
if r1.query_name == r2.query_name:
return r1, r2
r1 = r2
def aln_coverage(aln_list):
"""
Calculate the coverage across the reported alignments for a given read. This will most
often involve only a single alignment, but also considers non-overlapping alignments
reported by BWA MEM scavenged from the XP tag. Reports the number of bases covered
(<=read_len) and the overlap between them (normally 0).
:param aln_list: the list of alignments for a read
:return: dict {coverage: xx, overlap: yy}
"""
# using an intervaltree for this
tr = IntervalTree()
tot = 0
for ti in aln_list:
if ti['is_reverse']:
# reversed reads must be tallied from the opposite end
n = ti['total']
for op, nb in ti['cigartuple']:
if op == 0:
tr.addi(n - nb, n)
tot += nb
n -= nb
else:
# forward mapped reads tally from start position
n = 0
for op, nb in ti['cigartuple']:
if op == 0:
tr.addi(n, n + nb)
tot += nb
n += nb
# lazy means of merging intervals
tr.merge_overlaps()
cov = sum([i.end - i.begin for i in tr])
return {'coverage': cov, 'overlap': tot - cov, 'has_multi': len(aln_list) > 1}
def infer_inslen(r1, r2, ref_len):
"""
Infer the length of the insert for R1,R2.
:param r1: read 1
:param r2: read 2
:param ref_len: the total length of the reference sequence.
:return: insert length in bp.
"""
# switch reads around so that r1 is always the forward strand
if r1.is_reverse:
r2, r1 = r1, r2
sc = 0
if r1.pos == 0:
if r1.cigar[0][0] == 4:
sc += r1.cigar[0][1]
il = sc + r2.pos + r2.alen - r1.pos + 1
if il < 0:
il += ref_len
#if il > 1000:
# print il, r1.pos, r1.alen, r2.pos, r2.alen
return il
def infer_from_alignments(r1_alns, r2_alns, ref_len):
"""
Infer the length of an insert from all alignments of R1 and R2. This is done
in a brute force manner over all pairs.
:param r1_alns: R1 related alignments
:param r2_alns: R2 related alignments
:param ref_len: the length of the reference
:return: the longest possible insert length
"""
midpoint = ref_len / 2
dist = []
for r1, r2 in product(r1_alns, r2_alns):
if (r1['is_reverse'] and not r2['is_reverse']) or (not r1['is_reverse'] and r2['is_reverse']):
# opposite strands
if r1['pos'] > r1['pos']:
# insure r1 < r2
r2, r1 = r1, r2
if r1['is_reverse']:
r2, r1 = r1, r2
sc = 0
if r1['pos'] == 0:
if r1['cigartuple'][0][0] == 4:
sc += r1['cigartuple'][0][1]
il = sc + r2['pos'] + r2['alen'] - r1['pos'] + 1
if il < 0:
il += ref_len
if il > midpoint:
il = ref_len - il
#if il > 1000:
# print il, r1, r2
dist.append(il)
return max(dist)
def append_xp_alignments(aln_list, xp_record):
"""
Add any additional alignments reported in BWA MEM's XP record to the list.
This string record is doubly delimited, with multiple alignments delimited by
semi-colons and the individual fields of each alignment delimited by commas.
:param aln_list: the list of alignments
:param xp_record: the xp_data for this read
:return: a list of dicts containing each alignment's details, with any additional alignments appended.
"""
# split the records
for aln_i in xp_record.split(';'):
# skip empty records
if not aln_i:
continue
# split the fields for aln i
ti = aln_i.split(',')
pos = int(ti[1])
if pos < 0:
# position also encodes direction with sign.
pos = -pos
# convert to 0-based
pos -= 1
is_rev = True
else:
# convert to 0-based
pos -= 1
is_rev = False
# parse the cigar, compute some extra details
cigtup = cigar_to_tuple(ti[2])
alen = sum([num for op, num in cigtup if op == 0])
tot = sum([num for op, num in cigtup])
aln_list.append({'ref': ti[0],
'pos': pos,
'is_reverse': is_rev,
'cigar': ti[2],
'cigartuple': cigtup,
'nm': int(ti[3]),
'mapq': int(ti[4]),
'alen': alen,
'total': tot})
def parse_all_alignments(read):
"""
Parse a read record for all alignments. Both the primary and the "other non-overlapping"
alignments reported by BWA MEM (since v0.7.3) in the XP tag. The extra alignments are
important in situations where reads are split -- as is the case for HiC/Meta3C experiments
when reads cross the ligation junction.
:param read: the read record to parse
:return: a list of dicts containing alignment information.
"""
# primary
aln_list = [{'ref': read.reference_name,
'pos': read.pos,
'alen': read.alen,
'total': sum([num for op, num in read.cigartuples]),
'is_reverse': read.is_reverse,
'cigar': read.cigarstring,
'cigartuple': read.cigartuples,
'nm': None,
'mapq': read.mapq}]
# alternative non-overlapping
if read.has_tag('XP'):
append_xp_alignments(aln_list, read.get_tag('XP'))
return aln_list
def alignment_cutsite_status(aln, ref_sites, gap=1):
"""
Categorise the cutsites contained in an alignment as either 3 or 5 terminated or internal.
:param aln: the alignment to inspect
:param ref_sites: the intervaltree of cutsites for the reference
:param gap: a gap in bp, permitting adjust of "close enough" situations at the termini.
:return: a dict of booleans {'5p': t/f, '3p': t/f, 'internal': t/f}
"""
x1 = aln['pos']
x2 = x1 + aln['alen'] - 1
is_left_term = len(ref_sites[x1:x1 + gap + 1]) > 0
is_right_term = len(ref_sites[x2 - gap:x2 + 1]) > 0
# internal sites are only reported of wholly contained with the specified range.
has_internal = len(ref_sites.search(x1 + gap, x2 - gap, True)) > 0
if aln['is_reverse']:
return {'5p': is_right_term, '3p': is_left_term, 'internal': has_internal}
else:
return {'5p': is_left_term, '3p': is_right_term, 'internal': has_internal}
def read_cutsite_status(aln_terms):
"""
Determine a read-wide assessment of cutsite termination. This involves inspecting
all alignment termination details to produce a single boolean of whether the read
was 3p, 5p terminated or internally contains a cutsite.
:param aln_terms: list of terminations
:return:
"""
return {'5p': any(trm_i['5p'] for trm_i in aln_terms),
'3p': any(trm_i['3p'] for trm_i in aln_terms),
'internal': any(trm_i['internal'] for trm_i in aln_terms)}
def parse_bam(bam, ref_seq, enzyme):
print 'Counting reads...',
total_reads = bam.count(until_eof=True)
print ' found {0} reads'.format(total_reads)
print 'Creating cutsite interval tree...',
site_tree = site_intervaltree(ref_seq, enzyme)
print ' found {0} sites'.format(len(site_tree))
counts = {
'all_pairs': 0,
'incomplete': 0,
'3p_term': 0,
'3p_trunc': 0,
'readthru_conf': 0,
'readthru_multi': 0,
'readthru': 0,
'nosite': 0,
'hassite': 0,
'proper': 0, }
ins_len = []
ins_len_cs = []
outh = open('tab.csv', 'w')
print 'Beginning parsing...'
with tqdm.tqdm(total=total_reads) as pbar:
bam.reset()
bam_iter = bam.fetch(until_eof=True)
while True:
try:
pair = next_pair(bam_iter)
pbar.update(2)
counts['all_pairs'] += 1
except StopIteration:
break
r1, r2 = pair
# get the inferred full length of each read for later
r1_len = r1.infer_query_length()
r2_len = r2.infer_query_length()
if r1.is_unmapped or r2.is_unmapped:
# ignore incompletely mapped pairs
counts['incomplete'] += 1
continue
# get alignments for R1 and see if any involve a cutsite
r1_algns = parse_all_alignments(r1)
r1_count = len([inv for aln in r1_algns for inv in site_tree[aln['pos']:aln['pos'] + aln['alen']]])
# get alignments for R2 and see if any involve a cutsite
r2_algns = parse_all_alignments(r2)
r2_count = len([inv for aln in r2_algns for inv in site_tree[aln['pos']:aln['pos'] + aln['alen']]])
# if either read involves a cutsite, look a bit deeper
if r1_count > 0 or r2_count > 0:
counts['hassite'] += 1
r1_aln_info = aln_coverage(r1_algns)
r1_aln_status = [alignment_cutsite_status(aln_i, site_tree) for aln_i in r1_algns]
r1_status = read_cutsite_status(r1_aln_status)
r2_aln_info = aln_coverage(r2_algns)
r2_aln_status = [alignment_cutsite_status(aln_i, site_tree) for aln_i in r2_algns]
r2_status = read_cutsite_status(r2_aln_status)
# was there a 3p termination of r1 or r2
if r1_status['3p'] or r2_status['3p']:
counts['3p_term'] += 1
# was a read also incompletely aligned
if r1.alen < r1_len or r2.alen < r2_len:
counts['3p_trunc'] += 1
ins_len_cs.append(infer_from_alignments(r1_algns, r2_algns, bam.lengths[0]))
# was there both 5p and 3p termination, multiple and short alignments
if (r1_status['5p'] and r1_status['3p']) or (r2_status['5p'] and r2_status['3p']):
counts['readthru'] += 1
if (r1_aln_info['has_multi'] and r1.alen < r1_len) or \
(r2_aln_info['has_multi'] and r2.alen < r2_len):
counts['readthru_multi'] += 1
# do R1 and R2 possess an alignment in the same direction
r1_rev = [aln['is_reverse'] for aln in r1_algns]
r2_rev = [aln['is_reverse'] for aln in r2_algns]
if any(map(all, [x for x in product(r1_rev, r2_rev)])):
counts['readthru_conf'] += 1
ins_len_cs.append(infer_from_alignments(r1_algns, r2_algns, bam.lengths[0]))
else:
counts['nosite'] += 1
if r1.is_proper_pair:
counts['proper'] += 1
ins_len.append(infer_inslen(r1, r2, bam.lengths[0]))
print counts
return ins_len, ins_len_cs
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Parse name-sorted BAM for ligation junctions')
parser.add_argument('--plot', default=False, action='store_true', help='Plot insert lengths')
parser.add_argument('enzyme', help='Enzyme name')
parser.add_argument('seq', help='Reference fasta')
parser.add_argument('bam', help='BAM file sorted by name')
args = parser.parse_args()
try:
ref_seq = SeqIO.read(args.seq, 'fasta').seq
except ValueError as er:
print er.message
with pysam.AlignmentFile(args.bam, 'rb') as bam:
enzyme = get_enzyme_instance(args.enzyme)
len_data = parse_bam(bam, ref_seq, enzyme)
if args.plot:
import matplotlib.pyplot as plt
import numpy as np
print np.histogram(len_data[0])
plt.hist(len_data[0], bins=(np.arange(1, 41) * 25).tolist() + [np.inf])
plt.xlim(0, 1000)
plt.show()
print np.histogram(len_data[1])
plt.hist(len_data[1], bins=(np.arange(1, 41) * 25).tolist() + [np.inf])
plt.xlim(0, 1000)
plt.show()
| gpl-3.0 |
ilo10/scikit-learn | sklearn/datasets/lfw.py | 50 | 19048 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
meerkat-code/meerkat_api | meerkat_api/resources/indicators.py | 1 | 6821 | import pandas as pd
import numpy as np
from dateutil.relativedelta import relativedelta
from flask_restful import Resource
from flask import request
from sqlalchemy import or_, Float
from meerkat_api.extensions import db, api
from meerkat_api.util import series_to_json_dict
from meerkat_analysis.indicators import count_over_count, count, grouped_indicator
from meerkat_abacus.model import Data
from meerkat_api.authentication import authenticate
import meerkat_abacus.util.epi_week as ew
import time
import datetime
import logging
def prepare_indicator_output(analysis_output, mult_factor):
"""
Takes the output from the analysis and constructs the correct output
"""
indicator_data = dict()
cummulative = analysis_output[0]
if np.isnan(cummulative):
cummulative = 0
elif isinstance(cummulative, np.generic) or isinstance(cummulative, np.ndarray):
cummulative = np.asscalar(cummulative)
timeline = analysis_output[1] * mult_factor
indicator_data["cummulative"] = cummulative * mult_factor
indicator_data["timeline"] = series_to_json_dict(timeline)
indicator_data["current"] = float(np.asscalar(timeline.iloc[-1]))
indicator_data["previous"] = float(np.asscalar(timeline.iloc[-2]))
indicator_data["name"] = "Name is not passed to the API!"
return indicator_data
class Indicators(Resource):
"""
Return a value and a timeline of an indicator specified by a list of
variables and flags.
Args: \n
flags: A list containings char flags defining operations on variables.
`d` - denominator of an indicator, `n` - numerator, `v` -
additional variable to restrict query. `r` - restrict `
count_over_count` query if set to `1`\n
variables: A list of variables id to which flags correspond\n
location: location id
Returns:\n
indicator_data:
{cummulative: cummulative, timeline: timeline, current: current}\n
"""
decorators = [authenticate]
def get(self, flags, variables, location, start_date=None,
end_date=None, current_year=None):
current_year = request.args.get('current_year')
group_by_level = request.args.get('group_by_level')
if not start_date:
if current_year == "1":
this_year = datetime.datetime.now().year
start_date = ew.epi_year_start_date_by_year(this_year).isoformat()
else:
one_year_ago = datetime.datetime.now().date() - relativedelta(years=1)
start_date = one_year_ago.isoformat()
s = time.time()
mult_factor = 1
count_over = False
restricted_var = []
variablesList = variables.split(',')
flagsList = flags.split(',')
operations = list(zip(flagsList, variablesList))
for op in operations:
if op[0] == "d":
count_over = True
denominator = op[1]
if op[0] == "n":
numerator = op[1]
if op[0] == "v":
restricted_var.append(op[1])
if op[0] == "m":
mult_factor = int(op[1])
# Limit to location and numerator variable
conditions = [or_(
loc == location
for loc in (Data.country, Data.zone,
Data.region, Data.district,
Data.clinic))
]
conditions += [Data.date >= start_date]
# Limit to given restrict variables
for res_var in restricted_var:
conditions.append(Data.variables.has_key(res_var))
# Add denominator
try:
if count_over:
if denominator is None or numerator is None:
return "Need both denominator and numerator"
conditions.append(Data.variables.has_key(denominator))
# Database query
data = pd.read_sql(
db.session.query(Data.region, Data.district, Data.clinic,
Data.date,
Data.variables[numerator].astext.cast(Float).label(numerator),
Data.variables[denominator].astext.cast(Float).label(denominator)
).filter(
*conditions).statement, db.engine)
else:
conditions.append(Data.variables.has_key(numerator))
data = pd.read_sql(
db.session.query(
Data.region, Data.district, Data.clinic,
Data.date, Data.variables[numerator].label(numerator)
).filter(*conditions).statement, db.session.bind)
data = data.fillna(0)
if data.empty:
logging.warning("Indicators: No records!!!")
return {
"timeline": {},
"cummulative": 0,
"current": 0,
"previous": 0
}
# Call meerkat_analysis
if group_by_level:
if count_over:
analysis_output = grouped_indicator(
data, count_over_count, group_by_level,
numerator, denominator, start_date, end_date
)
else:
analysis_output = grouped_indicator(
data, count, group_by_level, numerator,
start_date, end_date
)
indicator_data = {}
for key in analysis_output:
indicator_data[str(key)] = prepare_indicator_output(
analysis_output[key], mult_factor
)
return indicator_data
else:
if count_over:
analysis_output = count_over_count(
data, numerator, denominator, start_date, end_date
)
else:
analysis_output = count(
data, numerator, start_date, end_date
)
return prepare_indicator_output(analysis_output, mult_factor)
except (RuntimeError, TypeError, NameError, IndexError) as err:
logging.error(err)
logging.error("Not enough data avaliable to show the indicator")
return {
"timeline": [],
"cummulative": 0,
"current": 0,
"previous": 0
}
api.add_resource(
Indicators, "/indicators/<flags>/<variables>/<location>",
"/indicators/<flags>/<variables>/<location>/<start_date>/<end_date>"
)
| mit |
monkeypants/MAVProxy | setup.py | 1 | 3872 | from setuptools import setup
import os, platform
version = "1.8.2"
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
package_data = ['modules/mavproxy_map/data/*.jpg',
'modules/mavproxy_map/data/*.png',
'modules/mavproxy_mmap/mmap_app/*',
'modules/mavproxy_joystick/joysticks/*.yml',
'modules/mavproxy_magical/data/*.mtl',
'modules/mavproxy_magical/data/*.obj',
'tools/graphs/*.xml',
]
package_data.extend(package_files('MAVProxy/modules/mavproxy_cesium/app'))
# note that we do not include all the real dependencies here (like matplotlib etc)
# as that breaks the pip install. It seems that pip is not smart enough to
# use the system versions of these dependencies, so it tries to download and install
# large numbers of modules like numpy etc which may be already installed
requirements=['pymavlink>=2.3.1',
'pyserial>=3.0']
if platform.system() == "Darwin":
# on MacOS we can have a more complete requirements list
requirements.extend(['billiard>=3.5.0',
'gnureadline',
'matplotlib',
'numpy',
'opencv-python',
'lxml',
'future',
'wxPython'])
setup(name='MAVProxy',
version=version,
zip_safe=True,
description='MAVProxy MAVLink ground station',
long_description='''A MAVLink protocol proxy and ground station. MAVProxy
is oriented towards command line operation, and is suitable for embedding in
small autonomous vehicles or for using on ground control stations. It also
features a number of graphical tools such as a slipmap for satellite mapping
view of the vehicles location, and status console and several useful vehicle
control modules. MAVProxy is extensible via a modules system - see the modules
subdirectory for some example modules. MAVProxy was developed by CanberraUAV
for use in the 2012 Outback Challenge, and includes a module for the
CanberraUAV search and rescue system. See
http://ardupilot.github.io/MAVProxy/ for more information
on how to use MAVProxy.''',
url='https://github.com/ArduPilot/MAVProxy',
author='Andrew Tridgell',
author_email='andrew@tridgell.net',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'],
license='GPLv3',
packages=['MAVProxy',
'MAVProxy.modules',
'MAVProxy.modules.mavproxy_map',
'MAVProxy.modules.mavproxy_mmap',
'MAVProxy.modules.mavproxy_misseditor',
'MAVProxy.modules.mavproxy_smartcamera',
'MAVProxy.modules.mavproxy_cesium',
'MAVProxy.modules.mavproxy_joystick',
'MAVProxy.modules.mavproxy_magical',
'MAVProxy.modules.lib',
'MAVProxy.modules.lib.ANUGA',
'MAVProxy.modules.lib.MacOS',
'MAVProxy.modules.lib.optparse_gui'],
install_requires=requirements,
scripts=['MAVProxy/mavproxy.py',
'MAVProxy/tools/mavflightview.py',
'MAVProxy/tools/MAVExplorer.py',
'MAVProxy/modules/mavproxy_map/mp_slipmap.py',
'MAVProxy/modules/mavproxy_map/mp_tile.py'],
package_data={'MAVProxy':
package_data}
)
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/decomposition/__init__.py | 2 | 1166 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA, ProbabilisticPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .fastica_ import FastICA, fastica
from .dict_learning import dict_learning, dict_learning_online, sparse_encode,\
DictionaryLearning, MiniBatchDictionaryLearning,\
SparseCoder
from .factor_analysis import FactorAnalysis
__all__ = ['DictionaryLearning',
'FastICA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProbabilisticPCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'sparse_encode',
'FactorAnalysis']
| unlicense |
kyleabeauchamp/FitEnsemble | fitensemble/nmr_tools/chemical_shift_readers.py | 1 | 2854 | import pandas as pd
import string
import numpy as np
""" TO DO: implement shiftx2 parser."""
def read_sparta_tab(filename, skiprows):
names = string.split("RESID RESNAME ATOMNAME SS_SHIFT SHIFT RC_SHIFT HM_SHIFT EF_SHIFT SIGMA")
x = pd.io.parsers.read_table(filename, skiprows=skiprows, header=None, names=names, sep="\s*")
x.rename(columns=lambda x: string.lower(x),inplace=True) # Make lowercase names
x.rename(columns={"atomname":"name"}, inplace=True) # let atomname be called name.
x["experiment"] = "CS"
x["name"] = x["name"].map(lambda x: x.replace("HN","H"))
x = x.pivot_table(rows=["experiment", "resid", "name"], values=["shift"])
return x
def read_all_sparta(filenames, skiprows):
num_frames = len(filenames)
filename = filenames[0]
x = read_sparta_tab(filename, skiprows)
num_measurements = x.shape[0]
d = pd.DataFrame(np.zeros((num_frames, num_measurements)), columns=x.index)
for k, filename in enumerate(filenames):
x = read_sparta_tab(filename, skiprows)
d.iloc[k] = x["shift"]
return d
def read_ppm_data(filename):
x = pd.io.parsers.read_table(filename, header=None, sep="\s*")
res_id = x.iloc[:,0]
res_name = x.iloc[:,1]
atom_name = x.iloc[:,2]
values = x.iloc[:,4:].values
# indices = ["CS_%d_%s" % (res_id[i], atom_name[i]) for i in range(len(res_id))]
expt = ["CS" for r in res_id]
indices = pd.MultiIndex.from_arrays((expt, res_id, atom_name), names=("experiment", "resid", "name"))
d = pd.DataFrame(values.T, columns=indices)
return d
def read_shiftx2_intermediate(directory):
atom_name = np.loadtxt(directory + "/shifts_atoms.txt", "str")
res_id = np.loadtxt(directory + "/shifts_resid.dat", 'int')
values = np.load(directory + "/shifts.npz")["arr_0"]
# indices = ["CS_%d_%s" % (res_id[i], atom_name[i]) for i in range(len(res_id))]
expt = ["CS" for r in res_id]
indices = pd.MultiIndex.from_arrays((expt, res_id, atom_name), names=("experiment", "resid", "name"))
d = pd.DataFrame(values, columns=indices)
return d
def read_shiftx2(filename):
x = pd.io.parsers.read_csv(filename) # NUM,RES,ATOMNAME,SHIFT
x.rename(columns=lambda x: string.lower(x),inplace=True) # Make lowercase names
x.rename(columns={"num":"resid", "atomname":"name"}, inplace=True) # let atomname be called name.
x["experiment"] = "CS"
x = x.pivot_table(rows=["experiment", "resid", "name"])
return x
def read_all_shiftx2(filenames):
num_frames = len(filenames)
filename = filenames[0]
x = read_shiftx2(filename)
num_measurements = x.shape[0]
d = pd.DataFrame(np.zeros((num_frames, num_measurements)), columns=x.index)
for k, filename in enumerate(filenames):
x = read_shiftx2(filename)
d.iloc[k] = x["shift"]
return d
| gpl-3.0 |
ch3ll0v3k/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
code-sauce/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py | 18 | 12209 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [
j % self._max for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive + 1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or pandas
`DataFrame` that will be read into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
etkirsch/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
ABcDexter/python-weka-wrapper | setup.py | 2 | 3655 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# setup.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import os
from setuptools import setup
from urllib2 import urlopen, URLError, HTTPError
def download_file(url, outfile):
"""
Downloads the file associated with the URL and saves it to the specified output file.
Taken from here: http://stackoverflow.com/a/4028894
:param url: the URL to download
:type url: str
:param outfile: the name of the output file
:type outfile: str
:returns: whether the download was successful
:rtype: bool
"""
try:
# Open the url
f = urlopen(url)
print("Downloading '" + url + "' to '" + outfile + "'")
# Open our local file for writing
with open(outfile, "wb") as local_file:
local_file.write(f.read())
# handle errors
except HTTPError, e:
print("HTTP Error: " + str(e.code) + " " + url)
return False
except URLError, e:
print("URL Error: " + str(e.reason) + " " + url)
return False
return True
def download_weka():
"""
Downloads the monolithic Weka jar from sourceforget.net if nececssary.
"""
url = "http://sourceforge.net/projects/weka/files/weka-3-7/3.7.12/weka-3-7-12-monolithic.jar/download"
outfile = os.path.join(os.path.dirname(__file__), "python", "weka", "lib", "weka.jar")
if not os.path.exists(outfile):
if not download_file(url, outfile):
print("Failed to download Weka jar '" + url + "' to '" + outfile + "'!")
else:
print("Download of Weka jar successful!")
def ext_modules():
"""
Initiates Weka jar download.
"""
download_weka()
def _read(f):
"""
Reads in the content of the file.
:param f: the file to read
:type f: str
:return: the content
:rtype: str
"""
return open(f, 'rb').read()
setup(
name="python-weka-wrapper",
description="Python wrapper for the Weka Machine Learning Workbench",
long_description=(
_read('DESCRIPTION.rst') + b'\n' +
_read('CHANGES.rst')).decode('utf-8'),
url="https://github.com/fracpete/python-weka-wrapper",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python',
],
license='GNU General Public License version 3.0 (GPLv3)',
package_dir={
'': 'python'
},
packages=[
"weka",
"weka.core",
"weka.plot"
],
package_data={
"weka": ["lib/*.jar"],
},
include_package_data=True,
version="0.3.2",
author='Peter "fracpete" Reutemann',
author_email='pythonwekawrapper at gmail dot com',
install_requires=[
"javabridge>=1.0.11",
"numpy"
],
extras_require={
'plots': ["matplotlib"],
'graphs': ["pygraphviz", "PIL"],
},
ext_modules=ext_modules(),
)
| gpl-3.0 |
rethore/FUSED-Wake | setup.py | 1 | 2873 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# try:
# from setuptools import setup
# except ImportError:
# from distutils.core import setup
#from setuptools import setup
#from setuptools import Extension
from numpy.distutils.core import setup
from numpy.distutils.extension import Extension
import os
import glob
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'sphinx',
'sphinx-fortran',
'numpy',
'scipy',
'pandas',
'matplotlib',
'PyYAML',
'utm'
]
test_requirements = [
'tox',
'pytest',
'coverall',
]
setup(
name='fusedwake',
version='0.1.0',
description="A collection of wind farm flow models for FUSED-Wind",
long_description=readme + '\n\n' + history,
author="Pierre-Elouan Rethore",
author_email='pire@dtu.dk',
url='https://github.com/DTUWindEnergy/FUSED-Wake',
packages=[
'fusedwake',
'fusedwake.gcl',
'fusedwake.gcl.python',
'fusedwake.noj',
# 'fusedwake.noj.python',
'fusedwake.gau',
# 'fusedwake.gau.python',
'fusedwake.ainslie',
# 'fusedwake.ainslie.python',
'fusedwake.sdwm',
],
package_dir={'fusedwake':
'fusedwake'},
include_package_data=True,
install_requires=requirements,
license="GNU Affero v3",
zip_safe=False,
keywords='fusedwake',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero v3',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
ext_package='fusedwake',
ext_modules=[Extension('gcl.fortran',
glob.glob(os.path.join('fusedwake', 'gcl', 'fortran',
'GCL.f'))),
Extension('noj.fortran',
glob.glob(os.path.join('fusedwake', 'noj', 'fortran',
'NOJ.f'))),
Extension('noj.fortran_mod',
glob.glob(os.path.join('fusedwake', 'noj', 'fortran',
'Mod_NOJ.f'))),
Extension('gau.fortran',
glob.glob(os.path.join('fusedwake', 'gau', 'fortran',
'GAU.f')))],
)
| agpl-3.0 |
ThomasMiconi/nupic.research | htmresearch/frameworks/layers/l2456_model.py | 2 | 22876 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This class allows to easily create experiments using a L2456 network for
inference over objects. It uses the network API and multiple regions (raw
sensors for sensor and external input, column pooler region, extended temporal
memory region).
Here is a sample use of this class, to learn objects and infer one of them. The
object creation details are TBD.
exp = L2456Model(
name="sample",
numCorticalColumns=2,
)
# Set up objects (TBD)
objects = createObjectMachine()
# Do the learning phase
exp.learnObjects(objects, reset=True)
exp.printProfile()
# Do the inference phase for one object
exp.infer(objects[0], reset=True)
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
plotDir="plots",
)
"""
import os
import random
import collections
import inspect
import cPickle
import matplotlib.pyplot as plt
from tabulate import tabulate
from htmresearch.support.logging_decorator import LoggingDecorator
from htmresearch.support.register_regions import registerAllResearchRegions
from htmresearch.frameworks.layers.laminar_network import createNetwork
def rerunExperimentFromLogfile(logFilename):
"""
Create an experiment class according to the sequence of operations in logFile
and return resulting experiment instance.
"""
callLog = LoggingDecorator.load(logFilename)
# Assume first one is call to constructor
exp = L2456Model(*callLog[0][1]["args"], **callLog[0][1]["kwargs"])
# Call subsequent methods, using stored parameters
for call in callLog[1:]:
method = getattr(exp, call[0])
method(*call[1]["args"], **call[1]["kwargs"])
return exp
class L2456Model(object):
"""
L2456 experiment.
This experiment uses the network API to test out various properties of
inference and learning using a sensors and an L4-L2 network. For now,
we directly use the locations on the object.
"""
@LoggingDecorator()
def __init__(self,
name,
numCorticalColumns=1,
L2Overrides={},
L4Overrides={},
L5Overrides={},
L6Overrides={},
numLearningPoints=3,
seed=42,
logCalls = False
):
"""
Creates the network.
Parameters:
----------------------------
@param name (str)
Experiment name
@param numCorticalColumns (int)
Number of cortical columns in the network
@param L2Overrides (dict)
Parameters to override in the L2 region
@param L4Overrides (dict)
Parameters to override in the L4 region
@param L5Overrides (dict)
Parameters to override in the L5 region
@param L6Overrides (dict)
Parameters to override in the L6 region
@param numLearningPoints (int)
Number of times each pair should be seen to be learnt
@param logCalls (bool)
If true, calls to main functions will be logged internally. The
log can then be saved with saveLogs(). This allows us to recreate
the complete network behavior using rerunExperimentFromLogfile
which is very useful for debugging.
"""
# Handle logging - this has to be done first
self.logCalls = logCalls
registerAllResearchRegions()
self.name = name
self.numLearningPoints = numLearningPoints
self.numColumns = numCorticalColumns
self.sensorInputSize = 2048
self.numInputBits = 40
# seed
self.seed = seed
random.seed(seed)
# Get network parameters and update with overrides
self.config = {
"networkType": "L2456Columns",
"numCorticalColumns": numCorticalColumns,
"randomSeedBase": self.seed,
}
self.config.update(self.getDefaultParams())
self.config["L2Params"].update(L2Overrides)
self.config["L4Params"].update(L4Overrides)
self.config["L5Params"].update(L5Overrides)
self.config["L6Params"].update(L6Overrides)
# create network and retrieve regions
self.network = createNetwork(self.config)
self._retrieveRegions()
# will be populated during training
self.objectRepresentationsL2 = {}
self.objectRepresentationsL5 = {}
self.statistics = []
@LoggingDecorator()
def learnObjects(self, objects, reset=True):
"""
Learns all provided objects, and optionally resets the network.
The provided objects must have the canonical learning format, which is the
following.
objects should be a dict objectName: sensationList, where the sensationList
is a list of sensations, and each sensation is a mapping from cortical
column to a tuple of three SDR's respectively corresponding to the
locationInput, the coarseSensorInput, and the sensorInput.
The model presents each sensation for numLearningPoints iterations before
moving on to the next sensation. Once the network has been trained on an
object, the L2 and L5 representations for it are stored. A reset signal is
sent whenever there is a new object if reset=True.
An example input is as follows, assuming we are learning a simple object
with a sequence of two sensations (with very few active bits for
simplicity):
objects = {
"simple": [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
}
In many uses cases, this object can be created by implementations of
ObjectMachines (cf htm.research.object_machine_factory), through their
method providedObjectsToLearn.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
"""
self._setLearningMode()
for objectName, sensationList in objects.iteritems():
# ignore empty sensation lists
if len(sensationList) == 0:
continue
# keep track of numbers of iterations to run
iterations = 0
for sensations in sensationList:
# learn each pattern multiple times
for _ in xrange(self.numLearningPoints):
for col in xrange(self.numColumns):
location, coarseFeature, fineFeature = sensations[col]
self.locationInputs[col].addDataToQueue(list(location), 0, 0)
self.coarseSensors[col].addDataToQueue(list(coarseFeature), 0, 0)
self.sensors[col].addDataToQueue(list(fineFeature), 0, 0)
iterations += 1
# actually learn the objects
if iterations > 0:
self.network.run(iterations)
# update L2 and L5 representations for this object
self.objectRepresentationsL2[objectName] = self.getL2Representations()
self.objectRepresentationsL5[objectName] = self.getL5Representations()
if reset:
# send reset signal
self._sendReset()
@LoggingDecorator()
def infer(self, sensationList, reset=True, objectName=None):
"""
Infer on a given set of sensations for a single object.
The provided sensationList is a list of sensations, and each sensation is a
mapping from cortical column to a tuple of three SDR's respectively
corresponding to the locationInput, the coarseSensorInput, and the
sensorInput.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
# location, coarse feature, fine feature for CC0, sensation 1
0: ( [1, 5, 10], [9, 32, 75], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 1
1: ( [6, 2, 15], [11, 42, 92], [7, 11, 50] ),
},
{
# location, coarse feature, fine feature for CC0, sensation 2
0: ( [2, 9, 10], [10, 35, 78], [6, 12, 52] ),
# location, coarse feature, fine feature for CC1, sensation 2
1: ( [1, 4, 12], [10, 32, 52], [6, 10, 52] ),
},
]
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
This is used later when evaluating inference statistics.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning).
"""
self._unsetLearningMode()
statistics = collections.defaultdict(list)
if objectName is not None:
if objectName not in self.objectRepresentationsL2:
raise ValueError("The provided objectName was not given during"
" learning")
for sensations in sensationList:
# feed all columns with sensations
for col in xrange(self.numColumns):
location, coarseFeature, fineFeature = sensations[col]
self.locationInputs[col].addDataToQueue(list(location), 0, 0)
self.coarseSensors[col].addDataToQueue(list(coarseFeature), 0, 0)
self.sensors[col].addDataToQueue(list(fineFeature), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
# send reset signal
self._sendReset()
# save statistics
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics)
@LoggingDecorator()
def sendReset(self, *args, **kwargs):
"""
Public interface to sends a reset signal to the network. This is logged.
"""
self._sendReset(*args, **kwargs)
def _sendReset(self, sequenceId=0):
"""
Sends a reset signal to the network.
"""
# Handle logging - this has to be done first
if self.logCalls:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
values.pop('frame')
values.pop('self')
(_, filename,
_, _, _, _) = inspect.getouterframes(inspect.currentframe())[1]
if os.path.splitext(os.path.basename(__file__))[0] != \
os.path.splitext(os.path.basename(filename))[0]:
self.callLog.append([inspect.getframeinfo(frame)[2], values])
for col in xrange(self.numColumns):
self.locationInputs[col].addResetToQueue(sequenceId)
self.coarseSensors[col].addResetToQueue(sequenceId)
self.sensors[col].addResetToQueue(sequenceId)
self.network.run(1)
def plotInferenceStats(self,
fields,
plotDir="plots",
experimentID=0,
onePlot=True):
"""
Plots and saves the desired inference statistics.
Parameters:
----------------------------
@param fields (list(str))
List of fields to include in the plots
@param experimentID (int)
ID of the experiment (usually 0 if only one was conducted)
@param onePlot (bool)
If true, all cortical columns will be merged in one plot.
"""
# TODO: implement it once learning and inference are working
raise RuntimeError("Unimplemented method")
def getInferenceStats(self, experimentID=None):
"""
Returns the statistics for the desired experiment. If experimentID is None
return all statistics.
Parameters:
----------------------------
@param experimentID (int)
Each time you call infer() you get a new set of inference
statistics. experimentID refers to which call you want stats for
(usually 0 if only one was conducted).
"""
if experimentID is None:
return self.statistics
else:
return self.statistics[experimentID]
def printProfile(self, reset=False):
"""
Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset.
"""
print "Profiling information for {}".format(type(self).__name__)
totalTime = 0.000001
for region in self.network.regions.values():
timer = region.getComputeTimer()
totalTime += timer.getElapsed()
# Sort the region names
regionNames = list(self.network.regions.keys())
regionNames.sort()
count = 1
profileInfo = []
L2Time = 0.0
L4Time = 0.0
for regionName in regionNames:
region = self.network.regions[regionName]
timer = region.getComputeTimer()
count = max(timer.getStartCount(), count)
profileInfo.append([region.name,
timer.getStartCount(),
timer.getElapsed(),
100.0 * timer.getElapsed() / totalTime,
timer.getElapsed() / max(timer.getStartCount(), 1)])
if "L2Column" in regionName:
L2Time += timer.getElapsed()
elif "L4Column" in regionName:
L4Time += timer.getElapsed()
profileInfo.append(
["Total time", "", totalTime, "100.0", totalTime / count])
print tabulate(profileInfo, headers=["Region", "Count",
"Elapsed", "Pct of total",
"Secs/iteration"],
tablefmt="grid", floatfmt="6.3f")
print
print "Total time in L2 =", L2Time
print "Total time in L4 =", L4Time
if reset:
self.resetProfile()
def resetProfile(self):
"""
Resets the network profiling.
"""
self.network.resetProfiling()
def getL4Representations(self):
"""
Returns the active representation in L4.
"""
return [set(column._tm.getActiveCells()) for column in self.L4Columns]
def getL4PredictiveCells(self):
"""
Returns the predictive cells in L4.
"""
return [set(column._tm.getPredictiveCells()) for column in self.L4Columns]
def getL2Representations(self):
"""
Returns a list of active cells in L2 for each column.
"""
return [set(column._pooler.getActiveCells()) for column in self.L2Columns]
def getL5Representations(self):
"""
Returns a list of active cells in L5 for each column.
"""
return [set(column._pooler.getActiveCells()) for column in self.L5Columns]
def getL6Representations(self):
"""
Returns the active representation in L4.
"""
return [set(column._tm.getActiveCells()) for column in self.L6Columns]
def getL6PredictiveCells(self):
"""
Returns the predictive cells in L4.
"""
return [set(column._tm.getPredictiveCells()) for column in self.L6Columns]
def getDefaultParams(self):
"""
Returns a good default set of parameters to use in L2456 regions
"""
return {
"sensorParams": {
"outputWidth": self.sensorInputSize,
},
"coarseSensorParams": {
"outputWidth": self.sensorInputSize,
},
"locationParams": {
"activeBits": 41,
"outputWidth": self.sensorInputSize,
"radius": 2,
"verbosity": 0,
},
"L4Params": {
"columnCount": self.sensorInputSize,
"cellsPerColumn": 8,
"formInternalBasalConnections": False,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"predictedSegmentDecrement": 0.002,
"activationThreshold": 13,
"maxNewSynapseCount": 20,
"implementation": "etm",
},
"L2Params": {
"inputWidth": self.sensorInputSize * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 10,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.5,
"learningMode": True,
},
"L6Params": {
"columnCount": self.sensorInputSize,
"cellsPerColumn": 8,
"formInternalBasalConnections": False,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"predictedSegmentDecrement": 0.004,
"activationThreshold": 13,
"maxNewSynapseCount": 20,
},
"L5Params": {
"inputWidth": self.sensorInputSize * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 10,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.5,
"learningMode": True,
},
}
def _retrieveRegions(self):
"""
Retrieve and store Python region instances for each column
"""
self.sensors = []
self.coarseSensors = []
self.locationInputs = []
self.L4Columns = []
self.L2Columns = []
self.L5Columns = []
self.L6Columns = []
for i in xrange(self.numColumns):
self.sensors.append(
self.network.regions["sensorInput_" + str(i)].getSelf()
)
self.coarseSensors.append(
self.network.regions["coarseSensorInput_" + str(i)].getSelf()
)
self.locationInputs.append(
self.network.regions["locationInput_" + str(i)].getSelf()
)
self.L4Columns.append(
self.network.regions["L4Column_" + str(i)].getSelf()
)
self.L2Columns.append(
self.network.regions["L2Column_" + str(i)].getSelf()
)
self.L5Columns.append(
self.network.regions["L5Column_" + str(i)].getSelf()
)
self.L6Columns.append(
self.network.regions["L6Column_" + str(i)].getSelf()
)
def _unsetLearningMode(self):
"""
Unsets the learning mode, to start inference.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, False)
for column in self.L6Columns:
column.setParameter("learn", 0, False)
for column in self.L2Columns:
column.setParameter("learningMode", 0, False)
for column in self.L5Columns:
column.setParameter("learningMode", 0, False)
def _setLearningMode(self):
"""
Sets the learning mode.
"""
for column in self.L4Columns:
column.setParameter("learn", 0, True)
for column in self.L6Columns:
column.setParameter("learn", 0, True)
for column in self.L2Columns:
column.setParameter("learningMode", 0, True)
for column in self.L5Columns:
column.setParameter("learningMode", 0, True)
def _updateInferenceStats(self, statistics, objectName=None):
"""
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
"""
L4Representations = self.getL4Representations()
L4PredictiveCells = self.getL4PredictiveCells()
L2Representations = self.getL2Representations()
L5Representations = self.getL5Representations()
L6Representations = self.getL6Representations()
L6PredictiveCells = self.getL6PredictiveCells()
for i in xrange(self.numColumns):
statistics["L4 Representation C" + str(i)].append(
len(L4Representations[i])
)
statistics["L4 Predictive C" + str(i)].append(
len(L4PredictiveCells[i])
)
statistics["L2 Representation C" + str(i)].append(
len(L2Representations[i])
)
statistics["L6 Representation C" + str(i)].append(
len(L6Representations[i])
)
statistics["L6 Predictive C" + str(i)].append(
len(L6PredictiveCells[i])
)
statistics["L5 Representation C" + str(i)].append(
len(L5Representations[i])
)
# add true overlap if objectName was provided
if objectName is not None:
objectRepresentationL2 = self.objectRepresentationsL2[objectName]
statistics["Overlap L2 with object C" + str(i)].append(
len(objectRepresentationL2[i] & L2Representations[i])
)
objectRepresentationL5 = self.objectRepresentationsL5[objectName]
statistics["Overlap L5 with object C" + str(i)].append(
len(objectRepresentationL5[i] & L5Representations[i])
)
| agpl-3.0 |
rosinality/knotter | knotter/mapper/lense.py | 1 | 1316 | import numpy as np
import scipy as sp
import scipy.linalg as la
import scipy.spatial.distance as dist
from sklearn import manifold
def pca(X, n_components=2):
centered = X - X.mean(axis=0)
U, s, Vt = la.svd(centered, full_matrices = False)
s2 = s ** 2
U = U[:, :n_components]
s = s[:n_components]
Vt = Vt[:n_components, :]
return U, s, Vt, s2
def t_SNE(X, n_components=2):
return manifold.TSNE(n_components=n_components, init='pca').fit_transform(X)
def spectral_embedding(X, n_components=2, n_neighbors=10):
return manifold.SpectralEmbedding(
n_components=n_components, n_neighbors=n_neighbors).fit_transform(X)
def Linfty_centering(X, options, metric='euclidean'):
return dist.squareform(dist.pdist(X, metric=metric)).max(axis=0)
def pca_projection(X, n_axis = 2):
N = X.shape[0]
U, s, Vt, s2 = pca(X, n_axis)
explained_variance = s2 / N
# U * diag(s) is a score matrix.
return U.dot(np.diag(s)), explained_variance / explained_variance.sum()
def simple_axis_projection(X, axis = 0):
return X[:, axis]
def gaussian_density(X, options, metric='euclidean'):
eps = float(options['epsilon'])
dist_mat = dist.squareform(dist.pdist(X, metric=metric))
return np.exp(-(dist_mat ** 2) / eps).sum(axis=0)
| mit |
wangyum/spark | python/pyspark/ml/clustering.py | 5 | 62508 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> summary.logLikelihood
65.02945...
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
Shaswat27/scipy | scipy/signal/windows.py | 11 | 53970 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import fftpack, linalg, special
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fftpack.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fftpack.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with `ifftshift`
and be multiplied by the result of an fft (SEE ALSO `fftfreq`).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
monarch-initiative/dipper | setup.py | 2 | 1401 | #!/usr/bin/env python3
from setuptools import setup, find_packages
import os
import subprocess
directory = os.path.dirname(os.path.abspath(__file__))
# long_description
readme_path = os.path.join(directory, 'README.md')
with open(readme_path) as read_file:
long_description = read_file.read()
setup(
name='dipper',
version='1.0.0',
author='The Monarch Initiative',
author_email='info@monarchinitiative.org',
url='https://github.com/monarch-initiative/dipper',
description='Library for transforming data from open genomic databases to RDF',
packages=find_packages(),
license='BSD',
install_requires=[
'rdflib',
'isodate',
'roman',
'pyyaml',
'requests',
'ontobio',
'psycopg2',
'python-docx',
'pysftp',
'beautifulsoup4',
'intermine',
'numpy',
'pandas',
'ontobio',
],
include_package_data=True,
keywords='ontology graph obo owl sparql rdf',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Visualization'
],
scripts=['./dipper-etl.py']
)
| bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/rad_flux/plot_from_pp_2201_diff_8km.py | 2 | 5598 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['dklyu']
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '2201_mean'
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Load diff cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/dkmb/dkmbq/%s.pp' % pp_file
glob = iris.load_cube(gl)
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile)
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
#pcube.remove_coord('grid_latitude')
#pcube.remove_coord('grid_longitude')
#pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
#pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
pcubediff=pcube-glob
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_min+degs_crop_left,lon_max-degs_crop_right,lat_min+degs_crop_bottom,lat_max-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,9)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
hypergravity/bopy | bopy/spec/dataset.py | 1 | 17097 | # -*- coding: utf-8 -*-
"""
migrated from TheCannon package
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import sys
from corner import corner
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rc
# from astropy.table import Table
from .find_continuum_pixels import *
from .continuum_normalization import (_cont_norm_gaussian_smooth,
_cont_norm_running_quantile,
_cont_norm_running_quantile_regions,
_find_cont_fitfunc,
_find_cont_fitfunc_regions,
_cont_norm,
_cont_norm_regions)
from .find_continuum_pixels import _find_contpix, _find_contpix_regions
rc('text', usetex=True)
rc('font', family='serif')
# if python3
PY3 = sys.version_info[0] > 2
if PY3:
basestring = (str, bytes)
else:
basestring = (str, unicode)
class Dataset(object):
""" A class to represent Cannon input: a dataset of spectra and labels """
def __init__(self, wl, tr_ID, tr_flux, tr_ivar, tr_label, test_ID, test_flux, test_ivar):
print("Loading dataset")
print("This may take a while...")
self.wl = wl
self.tr_ID = tr_ID
self.tr_flux = tr_flux
self.tr_ivar = tr_ivar
self.tr_label = tr_label
self.test_ID = test_ID
self.test_flux = test_flux
self.test_ivar = test_ivar
self.ranges = None
# calculate SNR
self.tr_SNR = np.array(
[self._SNR(*s) for s in zip(tr_flux, tr_ivar)])
self.test_SNR = np.array(
[self._SNR(*s) for s in zip(test_flux, test_ivar)])
def _SNR(self, flux, ivar):
""" Calculate the SNR of a spectrum, ignoring bad pixels
Parameters
----------
flux: numpy ndarray
pixel intensities
ivar: numpy ndarray
inverse variances corresponding to flux
Returns
-------
SNR: float
"""
take = ivar != 0
SNR = float(np.median(flux[take]*(ivar[take]**0.5)))
return SNR
def set_label_names(self, names):
""" Set the label names for plotting
Parameters
----------
names: ndarray or list
The names of the labels used for plotting, ex. in LaTeX syntax
"""
self._label_names = names
def get_plotting_labels(self):
""" Return the label names used make plots
Returns
-------
label_names: ndarray
The label names
"""
if self._label_names is None:
print("No label names yet!")
return None
else:
return self._label_names
def smooth_dataset(self):
""" Bins down all of the spectra and updates the dataset """
output = smooth_spectra(self.wl, self.tr_flux, self.tr_ivar)
self.wl = output[:,0,:]
self.tr_flux = output[:,1,:]
self.tr_ivar = output[:,2,:]
output = smooth_spectra(self.wl, self.test_flux, self.test_ivar)
self.test_flux = output[:,1,:]
self.test_ivar = output[:,2,:]
def diagnostics_SNR(self, figname="SNRdist.png"):
""" Plots SNR distributions of ref and test object spectra
Parameters
----------
(optional) figname: string
Filename to use for the output saved plot
"""
print("Diagnostic for SNRs of reference and survey objects")
data = self.test_SNR
plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, facecolor='r',
label="Survey Objects")
data = self.tr_SNR
plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, color='b',
label="Ref Objects")
plt.legend(loc='upper right')
#plt.xscale('log')
plt.title("SNR Comparison Between Reference and Survey Objects")
#plt.xlabel("log(Formal SNR)")
plt.xlabel("Formal SNR")
plt.ylabel("Number of Objects")
plt.savefig(figname)
plt.close()
print("Saved fig %s" %figname)
def diagnostics_ref_labels(self, figname="ref_labels_triangle.png"):
""" Plots all training labels against each other
Parameters
----------
(optional) figname: string
Filename of the saved output plot
"""
self._label_triangle_plot(self.tr_label, figname)
def _label_triangle_plot(self, label_vals, figname):
"""Make a triangle plot for the selected labels
Parameters
----------
label_vals: numpy ndarray
values of the labels
figname: str
if provided, save the figure into the given file
"""
labels = [r"$%s$" % l for l in self.get_plotting_labels()]
print("Plotting every label against every other")
fig = corner(label_vals, labels=labels, show_titles=True,
title_args={"fontsize":12})
fig.savefig(figname)
print("Saved fig %s" % figname)
plt.close(fig)
def make_contmask(self, fluxes, ivars, frac):
""" Identify continuum pixels using training spectra
Does this for each region of the spectrum if dataset.ranges is not None
Parameters
----------
fluxes: ndarray
Flux data values
ivars: ndarray
Inverse variances corresponding to flux data values
frac: float
The fraction of pixels that should be identified as continuum
Returns
-------
contmask: ndarray
Mask with True indicating that the pixel is continuum
"""
print("Finding continuum pixels...")
if self.ranges is None:
print("assuming continuous spectra")
contmask = _find_contpix(self.wl, fluxes, ivars, frac)
else:
print("taking spectra in %s regions" %len(self.ranges))
contmask = _find_contpix_regions(
self.wl, fluxes, ivars, frac, self.ranges)
print("%s pixels returned as continuum" %sum(contmask))
return contmask
def set_continuum(self, contmask):
""" Set the contmask attribute
Parameters
----------
contmask: ndarray
Mask with True indicating that the pixel is continuum
"""
self.contmask = contmask
def fit_continuum(self, deg, ffunc):
""" Fit a continuum to the continuum pixels
Parameters
----------
deg: int
Degree of the fitting function
ffunc: str
Type of fitting function, 'sinusoid' or 'chebyshev'
Returns
-------
tr_cont: ndarray
Flux values corresponding to the fitted continuum of training objects
test_cont: ndarray
Flux values corresponding to the fitted continuum of test objects
"""
print("Fitting Continuum...")
if self.ranges == None:
tr_cont = _find_cont_fitfunc(
self.tr_flux, self.tr_ivar, self.contmask, deg, ffunc)
test_cont = _find_cont_fitfunc(
self.test_flux, self.test_ivar, self.contmask, deg, ffunc)
else:
print("Fitting Continuum in %s Regions..." %len(self.ranges))
tr_cont = _find_cont_fitfunc_regions(self.tr_flux, self.tr_ivar,
self.contmask, deg, self.ranges, ffunc)
test_cont = _find_cont_fitfunc_regions(
self.test_flux, self.test_ivar,
self.contmask, deg, self.ranges, ffunc)
return tr_cont, test_cont
def continuum_normalize_training_q(self, q, delta_lambda):
""" Continuum normalize the training set using a running quantile
Parameters
----------
q: float
The quantile cut
delta_lambda: float
The width of the pixel range used to calculate the median
"""
print("Continuum normalizing the tr set using running quantile...")
if self.ranges is None:
return _cont_norm_running_quantile(
self.wl, self.tr_flux, self.tr_ivar,
q=q, delta_lambda=delta_lambda)
else:
return _cont_norm_running_quantile_regions(
self.wl, self.tr_flux, self.tr_ivar,
q=q, delta_lambda=delta_lambda, ranges=self.ranges)
def continuum_normalize(self, cont):
"""
Continuum normalize spectra, in chunks if spectrum has regions
Parameters
----------
cont: ndarray
Flux values corresponding to the continuum
Returns
-------
norm_tr_flux: ndarray
Normalized flux values for the training objects
norm_tr_ivar: ndarray
Rescaled inverse variance values for the training objects
norm_test_flux: ndarray
Normalized flux values for the test objects
norm_test_ivar: numpy ndarray
Rescaled inverse variance values for the test objects
"""
tr_cont, test_cont = cont
if self.ranges is None:
print("assuming continuous spectra")
norm_tr_flux, norm_tr_ivar = _cont_norm(
self.tr_flux, self.tr_ivar, tr_cont)
norm_test_flux, norm_test_ivar = _cont_norm(
self.test_flux, self.test_ivar, test_cont)
else:
print("taking spectra in %s regions" %(len(self.ranges)))
norm_tr_flux, norm_tr_ivar = _cont_norm_regions(
self.tr_flux, self.tr_ivar, tr_cont, self.ranges)
norm_test_flux, norm_test_ivar = _cont_norm_regions(
self.test_flux, self.test_ivar, test_cont, self.ranges)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar
def continuum_normalize_gaussian_smoothing(self, L):
""" Continuum normalize using a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
"""
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
_cont_norm_gaussian_smooth(self, L)
self.tr_flux = norm_tr_flux
self.tr_ivar = norm_tr_ivar
self.test_flux = norm_test_flux
self.test_ivar = norm_test_ivar
def diagnostics_test_step_flagstars(self):
"""
Write files listing stars whose inferred labels lie outside 2 standard deviations from the reference label space
"""
label_names = self.get_plotting_labels()
nlabels = len(label_names)
reference_labels = self.tr_label
test_labels = self.test_label_vals
test_IDs = np.array(self.test_ID)
mean = np.mean(reference_labels, 0)
stdev = np.std(reference_labels, 0)
lower = mean - 2 * stdev
upper = mean + 2 * stdev
for i in range(nlabels):
label_name = label_names[i]
test_vals = test_labels[:,i]
warning = np.logical_or(test_vals < lower[i], test_vals > upper[i])
filename = "flagged_stars_%s.txt" % i
with open(filename, 'w') as output:
for star in test_IDs[warning]:
output.write('{0:s}\n'.format(star))
print("Reference label %s" % label_name)
print("flagged %s stars beyond 2-sig of ref labels" % sum(warning))
print("Saved list %s" % filename)
def diagnostics_survey_labels(self, figname="survey_labels_triangle.png"):
""" Plot all survey labels against each other
Parameters
----------
(optional) figname: str
Filename of saved output plot
"""
self._label_triangle_plot(self.test_label_vals, figname)
def diagnostics_1to1(self, figname="1to1_label"):
""" Plots survey labels vs. training labels, color-coded by survey SNR """
snr = self.test_SNR
label_names = self.get_plotting_labels()
nlabels = len(label_names)
reference_labels = self.tr_label
test_labels = self.test_label_vals
for i in range(nlabels):
name = label_names[i]
orig = reference_labels[:,i]
cannon = test_labels[:,i]
# calculate bias and scatter
scatter = np.round(np.std(orig-cannon),5)
bias = np.round(np.mean(orig-cannon),5)
low = np.minimum(min(orig), min(cannon))
high = np.maximum(max(orig), max(cannon))
fig = plt.figure(figsize=(10,6))
gs = gridspec.GridSpec(1,2,width_ratios=[2,1], wspace=0.3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot([low, high], [low, high], 'k-', linewidth=2.0, label="x=y")
ax1.set_xlim(low, high)
ax1.set_ylim(low, high)
ax1.legend(fontsize=14, loc='lower right')
pl = ax1.scatter(orig, cannon, marker='x', c=snr,
vmin=50, vmax=200, alpha=0.7)
cb = plt.colorbar(pl, ax=ax1, orientation='horizontal')
cb.set_label('SNR from Test Set', fontsize=12)
textstr = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,
fontsize=14, verticalalignment='top')
ax1.tick_params(axis='x', labelsize=14)
ax1.tick_params(axis='y', labelsize=14)
ax1.set_xlabel("Reference Value", fontsize=14)
ax1.set_ylabel("Cannon Test Value", fontsize=14)
ax1.set_title("1-1 Plot of Label " + r"$%s$" % name)
diff = cannon-orig
npoints = len(diff)
mu = np.mean(diff)
sig = np.std(diff)
ax2.hist(diff)
#ax2.hist(diff, range=[-3*sig,3*sig], color='k', bins=np.sqrt(npoints),
# orientation='horizontal', alpha=0.3, histtype='stepfilled')
ax2.tick_params(axis='x', labelsize=14)
ax2.tick_params(axis='y', labelsize=14)
ax2.set_xlabel("Count", fontsize=14)
ax2.set_ylabel("Difference", fontsize=14)
ax2.axhline(y=0, c='k', lw=3, label='Difference=0')
ax2.set_title("Training Versus Test Labels for $%s$" %name,
fontsize=14)
ax2.legend(fontsize=14)
figname_full = "%s_%s.png" %(figname, i)
plt.savefig(figname_full)
print("Diagnostic for label output vs. input")
print("Saved fig %s" % figname_full)
plt.close()
def set_test_label_vals(self, vals):
""" Set test label values
Parameters
----------
vals: ndarray
Test label values
"""
self.test_label_vals = vals
def diagnostics_best_fit_spectra(self, *args, **kwargs):
""" Plot results of best-fit spectra for ten random test objects """
# overlay_spectra(model, self)
print('-------------------------------------------------------------')
print('@Cham: This method is deprecated due to in-complete migration')
print('@Cham: I am so sorry about that ... ')
print('-------------------------------------------------------------')
return None
# ###################################
# some general methods are moved here
# ###################################
def bin_flux(flux, ivar):
""" bin two neighboring flux values """
if np.sum(ivar)==0:
return np.sum(flux)/2.
return np.average(flux, weights=ivar)
def smooth_spectrum(wl, flux, ivar):
""" Bins down one spectrum
Parameters
----------
wl: numpy ndarray
wavelengths
flux: numpy ndarray
flux values
ivar: numpy ndarray
inverse variances associated with fluxes
Returns
-------
wl: numpy ndarray
updated binned pixel wavelengths
flux: numpy ndarray
updated binned flux values
ivar: numpy ndarray
updated binned inverse variances
"""
# if odd, discard the last point
if len(wl)%2 == 1:
wl = np.delete(wl, -1)
flux = np.delete(flux, -1)
ivar = np.delete(ivar, -1)
wl = wl.reshape(-1,2)
ivar = ivar.reshape(-1,2)
flux = flux.reshape(-1,2)
wl_binned = np.mean(wl, axis=1)
ivar_binned = np.sqrt(np.sum(ivar**2, axis=1))
flux_binned = np.array([bin_flux(f,w) for f,w in zip(flux, ivar)])
return wl_binned, flux_binned, ivar_binned
def smooth_spectra(wl, fluxes, ivars):
""" Bins down a block of spectra """
output = np.asarray(
[smooth_spectrum(wl, flux, ivar) for flux,ivar in zip(fluxes, ivars)])
return output
| bsd-3-clause |
caisq/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 41 | 20535 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.test_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.test_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.test_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.test_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
rstoneback/pysat | pysat/tests/test_ssnl_plot.py | 2 | 3184 | """
tests the pysat averaging code
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
import pysat
from pysat.ssnl import plot
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean')
self.testInst.bounds = (pysat.datetime(2008, 1, 1),
pysat.datetime(2008, 1, 1))
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
plt.close()
def test_scatterplot_w_ioff(self):
"""Check if scatterplot generates"""
plt.ioff()
figs = plot.scatterplot(self.testInst, 'longitude', 'latitude',
'slt', [0.0, 24.0])
axes = figs[0].get_axes()
assert len(figs) == 1
assert len(axes) == 3
assert not mpl.is_interactive()
def test_scatterplot_w_ion(self):
"""Check if scatterplot generates and resets to interactive mode"""
plt.ion()
figs = plot.scatterplot(self.testInst, 'longitude', 'latitude',
'slt', [0.0, 24.0])
axes = figs[0].get_axes()
assert len(figs) == 1
assert len(axes) == 3
assert mpl.is_interactive()
def test_scatterplot_w_limits(self):
"""Check if scatterplot generates with appropriate limits"""
figs = plot.scatterplot(self.testInst, 'longitude', 'latitude',
'slt', [0.0, 24.0],
xlim=[0, 360], ylim=[-80, 80])
axes = figs[0].get_axes()
assert len(figs) == 1
assert len(axes) == 3
assert axes[0].get_xlim() == (0, 360)
assert axes[1].get_xlim() == (0, 360)
assert axes[0].get_ylim() == (-80, 80)
assert axes[1].get_ylim() == (-80, 80)
def test_multiple_scatterplots(self):
"""Check if multiple scatterplots generate"""
figs = plot.scatterplot(self.testInst, 'longitude', 'latitude',
['slt', 'mlt'], [0.0, 24.0])
axes = figs[0].get_axes()
axes2 = figs[1].get_axes()
assert len(figs) == 2
assert len(axes) == 3
assert len(axes2) == 3
class TestDeprecation():
def setup(self):
"""Runs before every method to create a clean testing setup."""
warnings.simplefilter("always")
def teardown(self):
"""Runs after every method to clean up previous testing."""
def test_deprecation_warning_scatterplot(self):
"""Test if scatterplot in ssnl is deprecated"""
with warnings.catch_warnings(record=True) as war:
try:
plot.scatterplot(None, 'longitude', 'latitude', ['slt', 'mlt'],
[0.0, 24.0])
except TypeError:
# Setting inst to None should produce a TypeError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
| bsd-3-clause |
droundy/deft | papers/histogram/figs/ising-N128-lndos-comparison.py | 1 | 1853 | from __future__ import division, print_function
import sys, os, matplotlib
import numpy as np
matplotlib.rcParams['text.usetex'] = True
matplotlib.rc('font', family='serif')
if 'noshow' in sys.argv:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import colors
import readnew
plt.figure(figsize=(5, 4))
#fname = 'data/s000/periodic-ww1.30-ff0.30-N50-samc-100000-movie/005000-lndos.dat'
fname = 'data/ising-sad-128-s1-reference-lndos.dat'
e, lndos = readnew.e_lndos(fname)
emax = 108
emin = -32770
Tmin = 1.5
eminimportant = -32770
eSmax = 0
Smin = -11326
Sminimportant = -11000
#Sminimportant = 0
plt.plot(e, (e - eminimportant)/.2 + Sminimportant - Smin, 'r:')
ei = np.arange(eminimportant, eSmax, 0.1)
B = 1/(2*Tmin*(eSmax - eminimportant))
Stop = Sminimportant + B*(eSmax - eminimportant)**2
plt.fill_between(ei,
Stop - B*(ei - eSmax)**2 - Smin,
ei*0 + Sminimportant - Smin,
color='tab:blue', alpha=0.25,
label=r'quadratic $\Delta S_{\textrm{tot}}$')
interesting = (e > eminimportant)*(e < eSmax)
plt.rcParams['hatch.color'] = 'tab:green'
plt.rcParams['hatch.linewidth'] = 3
plt.fill_between(e[interesting],
lndos[interesting] - Smin,
e[interesting]*0 + Sminimportant - Smin,
#color='tab:green',
hatch='\\\\\\', label=r'actual $\Delta S_{\textrm{tot}}$',
facecolor='none', edgecolor='tab:green', linewidth=0)
plt.plot(e, lndos - Smin, color='tab:green', label='$S$')
plt.axvline(eminimportant, linestyle=':', color='tab:gray')
plt.axvline(eSmax, linestyle=':', color='tab:gray')
plt.ylabel('$S/k_B$')
plt.ylim(0, (Stop-Smin)*1.05)
plt.xlim(emin, emax)
plt.xlabel('$E$')
plt.legend(loc='lower right')
plt.tight_layout()
#plt.savefig('figs/N50-lndos-comparison.pdf')
plt.show()
| gpl-2.0 |
Akshay0724/scikit-learn | sklearn/covariance/tests/test_covariance.py | 79 | 12193 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[::n_features + 1] -= mu
delta = (delta_ ** 2).sum() / n_features
X2 = X ** 2
beta_ = 1. / (n_features * n_samples) \
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov ** 2)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
torypages/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
jmmease/pandas | pandas/tests/indexing/test_callable.py | 14 | 8721 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestIndexingCallable(object):
def test_frame_loc_ix_callable(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
# iloc cannot use boolean Series (see GH3635)
# return bool indexer
res = df.loc[lambda x: x.A > 2]
tm.assert_frame_equal(res, df.loc[df.A > 2])
res = df.loc[lambda x: x.A > 2]
tm.assert_frame_equal(res, df.loc[df.A > 2])
res = df.loc[lambda x: x.A > 2, ]
tm.assert_frame_equal(res, df.loc[df.A > 2, ])
res = df.loc[lambda x: x.A > 2, ]
tm.assert_frame_equal(res, df.loc[df.A > 2, ])
res = df.loc[lambda x: x.B == 'b', :]
tm.assert_frame_equal(res, df.loc[df.B == 'b', :])
res = df.loc[lambda x: x.B == 'b', :]
tm.assert_frame_equal(res, df.loc[df.B == 'b', :])
res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B']
tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B']
tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
res = df.loc[lambda x: x.A > 2, lambda x: 'B']
tm.assert_series_equal(res, df.loc[df.A > 2, 'B'])
res = df.loc[lambda x: x.A > 2, lambda x: 'B']
tm.assert_series_equal(res, df.loc[df.A > 2, 'B'])
res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']])
res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']])
# scalar
res = df.loc[lambda x: 1, lambda x: 'A']
assert res == df.loc[1, 'A']
res = df.loc[lambda x: 1, lambda x: 'A']
assert res == df.loc[1, 'A']
def test_frame_loc_ix_callable_mixture(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
res = df.loc[lambda x: x.A > 2, ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A > 2, ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[[2, 3], lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']])
res = df.loc[[2, 3], lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']])
res = df.loc[3, lambda x: ['A', 'B']]
tm.assert_series_equal(res, df.loc[3, ['A', 'B']])
res = df.loc[3, lambda x: ['A', 'B']]
tm.assert_series_equal(res, df.loc[3, ['A', 'B']])
def test_frame_loc_callable(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return label
res = df.loc[lambda x: ['A', 'C']]
tm.assert_frame_equal(res, df.loc[['A', 'C']])
res = df.loc[lambda x: ['A', 'C'], ]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ])
res = df.loc[lambda x: ['A', 'C'], :]
tm.assert_frame_equal(res, df.loc[['A', 'C'], :])
res = df.loc[lambda x: ['A', 'C'], lambda x: 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[lambda x: ['A', 'C'], lambda x: ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
# mixture
res = df.loc[['A', 'C'], lambda x: 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[['A', 'C'], lambda x: ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
res = df.loc[lambda x: ['A', 'C'], 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[lambda x: ['A', 'C'], ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
def test_frame_loc_callable_setitem(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return label
res = df.copy()
res.loc[lambda x: ['A', 'C']] = -20
exp = df.copy()
exp.loc[['A', 'C']] = -20
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], :] = 20
exp = df.copy()
exp.loc[['A', 'C'], :] = 20
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], lambda x: 'X'] = -1
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], lambda x: ['X']] = [5, 10]
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = [5, 10]
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.loc[['A', 'C'], lambda x: 'X'] = np.array([-1, -2])
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = np.array([-1, -2])
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[['A', 'C'], lambda x: ['X']] = 10
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], 'X'] = -2
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = -2
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], ['X']] = -4
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = -4
tm.assert_frame_equal(res, exp)
def test_frame_iloc_callable(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return location
res = df.iloc[lambda x: [1, 3]]
tm.assert_frame_equal(res, df.iloc[[1, 3]])
res = df.iloc[lambda x: [1, 3], :]
tm.assert_frame_equal(res, df.iloc[[1, 3], :])
res = df.iloc[lambda x: [1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
# mixture
res = df.iloc[[1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[[1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
res = df.iloc[lambda x: [1, 3], 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
def test_frame_iloc_callable_setitem(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return location
res = df.copy()
res.iloc[lambda x: [1, 3]] = 0
exp = df.copy()
exp.iloc[[1, 3]] = 0
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], :] = -1
exp = df.copy()
exp.iloc[[1, 3], :] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: 0] = 5
exp = df.copy()
exp.iloc[[1, 3], 0] = 5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
exp = df.copy()
exp.iloc[[1, 3], [0]] = 25
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.iloc[[1, 3], lambda x: 0] = -3
exp = df.copy()
exp.iloc[[1, 3], 0] = -3
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[[1, 3], lambda x: [0]] = -5
exp = df.copy()
exp.iloc[[1, 3], [0]] = -5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], 0] = 10
exp = df.copy()
exp.iloc[[1, 3], 0] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
exp = df.copy()
exp.iloc[[1, 3], [0]] = [-5, -5]
tm.assert_frame_equal(res, exp)
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
silky/sms-tools | lectures/09-Sound-description/plots-code/mfcc.py | 25 | 1103 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
mfcc = ess.MFCC(numberCoefficients = 12)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
mfccs = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
mfcc_bands, mfcc_coeffs = mfcc(mX)
mfccs.append(mfcc_coeffs)
mfccs = np.array(mfccs)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
numFrames = int(mfccs[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, 1+np.arange(12), np.transpose(mfccs[:,1:]))
plt.ylabel('coefficients')
plt.title('MFCCs')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('mfcc.png')
plt.show()
| agpl-3.0 |
quchunguang/test | testpy/testmatplotlib.py | 1 | 1277 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
def simple_plot1():
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
plt.show()
def simple_plot2():
plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro')
plt.axis([0, 6, 0, 20])
plt.show()
def simple_plot3():
# evenly sampled time at 200ms intervals
t = np.arange(0., 5., 0.2)
# red dashes, blue squares and green triangles
plt.plot(t, t, 'r--', t, t**2, 'bs', t, t**3, 'g^')
plt.show()
def subplot():
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
plt.figure(1)
plt.subplot(211)
plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
plt.show()
def hist():
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000, 1)
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
if __name__ == '__main__':
print norm.pdf(0)
hist()
| mit |
petebachant/TurbineDAQ-project-template | Modules/processing.py | 1 | 30074 | # -*- coding: utf-8 -*-
"""
This module contains classes and functions for processing data.
"""
from __future__ import division, print_function
import numpy as np
from pxl import timeseries as ts
from pxl.timeseries import loadhdf
import matplotlib.pyplot as plt
import multiprocessing as mp
import scipy.stats
from scipy.stats import nanmean, nanstd
from pxl import fdiff
import progressbar
import json
import os
import sys
import pandas as pd
if sys.version_info[0] == 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
def read_turbine_properties():
with open("Config/turbine_properties.json") as f:
turbine_properties = json.load(f)
return turbine_properties
# Read turbine property constants
turbine_name = "RVAT"
turbine_properties = read_turbine_properties()[turbine_name]
H = turbine_properties["height"]
D = turbine_properties["diameter"]
A = D*H
R = D/2
rho = 1000.0
nu = 1e-6
# Directory constants
raw_data_dir = os.path.join("Data", "Raw")
processed_data_dir = os.path.join("Data", "Processed")
def calc_b_vec(vel):
"""Calculates the systematic error of a Vectrino measurement (in m/s)
from their published specs. Returns half the +/- value as b."""
return 0.5*(0.005*np.abs(vel) + 0.001)
def calc_uncertainty(quantity, b):
return np.sqrt(nanstd(quantity)**2 + b**2)
def calc_tare_torque(rpm):
"""Returns tare torque array given RPM array."""
return 0.00104768276035*rpm - 0.848866229797
def calc_re_c(u_infty, c=0.14, tsr=3.1):
"""
Calculates the average blade chord Reynolds number based on free
stream velocity and tip speed ratio.
"""
return tsr*u_infty*c/nu
class Run(object):
"""Object that represents a single turbine tow"""
def __init__(self, section, nrun):
self.section = section
self.nrun = int(nrun)
section_raw_dir = os.path.join("Data", "Raw", section)
self.raw_dir = os.path.join(section_raw_dir, str(self.nrun))
self.loaded = False
self.t2found = False
self.not_loadable = False
self.wake_calculated = False
self.load()
# Do all processing if all data was loaded successfully
if self.loaded:
self.subtract_tare_drag()
self.add_tare_torque()
self.calc_perf_instantaneous()
self.make_trimmed()
self.calc_perf_per_rev()
self.calc_perf_stats()
self.calc_perf_uncertainty()
self.calc_perf_exp_uncertainty()
else:
print("Cannot load Run data")
def load(self):
"""Loads the data from the run into memory."""
self.loaded = True
try:
with open("Config/raw_data_urls.json") as f:
raw_data_urls = json.load(f)
except IOError:
raw_data_urls = {}
# Load metadata if it exists
fpath_metadata = os.path.join(self.raw_dir, "metadata.json")
if os.path.isfile(fpath_metadata):
self.load_metadata()
elif make_remote_name(fpath_metadata) in raw_data_urls:
self.download_raw("metadata.json")
self.load_metadata()
else:
self.loaded = False
# Load NI data if it exists
fpath_nidata = os.path.join(self.raw_dir, "nidata.h5")
if os.path.isfile(fpath_nidata):
self.load_nidata()
elif make_remote_name(fpath_nidata) in raw_data_urls:
self.download_raw("nidata.h5")
self.load_nidata()
else:
self.loaded = False
# Load ACS data if it exists
fpath_acsdata = os.path.join(self.raw_dir, "acsdata.h5")
if os.path.isfile(fpath_acsdata):
self.load_acsdata()
elif make_remote_name(fpath_acsdata) in raw_data_urls:
self.download_raw("acsdata.h5")
self.load_acsdata()
else:
self.loaded = False
def load_metadata(self):
"""Loads run metadata."""
with open(os.path.join(self.raw_dir, "metadata.json")) as f:
self.metadata = json.load(f)
self.tow_speed_nom = np.round(self.metadata["Tow speed (m/s)"], decimals=1)
self.tsr_nom = self.metadata["Tip speed ratio"]
def load_nidata(self):
nidata = loadhdf(os.path.join(self.raw_dir, "nidata.h5"))
self.time_ni = nidata["time"]
self.sr_ni = (1.0/(self.time_ni[1] - self.time_ni[0]))
self.carriage_pos = nidata["carriage_pos"]
self.tow_speed_ni = fdiff.second_order_diff(self.carriage_pos, self.time_ni)
self.tow_speed_ni = ts.smooth(self.tow_speed_ni, 100)
self.tow_speed_ref = self.tow_speed_ni.copy()
self.tow_speed_ref[np.abs(self.tow_speed_ref) < 0.01] = np.nan
self.torque = nidata["torque_trans"]
self.torque_arm = nidata["torque_arm"]
self.drag = nidata["drag_left"] + nidata["drag_right"]
# Remove offsets from drag, not torque
t0 = 2
self.drag = self.drag - np.mean(self.drag[0:self.sr_ni*t0])
# Compute RPM and omega
self.angle = nidata["turbine_angle"]
self.rpm_ni = fdiff.second_order_diff(self.angle, self.time_ni)/6.0
self.rpm_ni = ts.smooth(self.rpm_ni, 8)
self.omega_ni = self.rpm_ni*2*np.pi/60.0
self.omega = self.omega_ni
self.tow_speed = self.tow_speed_ref
def load_acsdata(self):
fpath = os.path.join(self.raw_dir, "acsdata.h5")
acsdata = loadhdf(fpath)
self.tow_speed_acs = acsdata["carriage_vel"]
self.rpm_acs = acsdata["turbine_rpm"]
self.rpm_acs = ts.sigmafilter(self.rpm_acs, 3, 3)
self.omega_acs = self.rpm_acs*2*np.pi/60.0
self.time_acs = acsdata["time"]
if len(self.time_acs) != len(self.omega_acs):
newlen = np.min((len(self.time_acs), len(self.omega_acs)))
self.time_acs = self.time_acs[:newlen]
self.omega_acs = self.omega_acs[:newlen]
self.omega_acs_interp = np.interp(self.time_ni, self.time_acs, self.omega_acs)
self.rpm_acs_interp = self.omega_acs_interp*60.0/(2*np.pi)
def download_raw(self, name):
download_raw(self.section, self.nrun, name)
def subtract_tare_drag(self):
df = pd.read_csv(os.path.join("Data", "Processed", "Tare-drag.csv"))
self.tare_drag = df.tare_drag[df.tow_speed==self.tow_speed_nom].values[0]
self.drag = self.drag - self.tare_drag
def add_tare_torque(self):
rpm_ref = self.rpm_ni
# Add tare torque
self.tare_torque = calc_tare_torque(rpm_ref)
self.torque += self.tare_torque
def calc_perf_instantaneous(self):
omega_ref = self.omega_ni
# Compute power
self.power = self.torque*omega_ref
self.tsr = omega_ref*R/self.tow_speed_ref
# Compute power, drag, and torque coefficients
self.cp = self.power/(0.5*rho*A*self.tow_speed_ref**3)
self.cd = self.drag/(0.5*rho*A*self.tow_speed_ref**2)
self.ct = self.torque/(0.5*rho*A*R*self.tow_speed_ref**2)
def make_trimmed(self):
"""Trim all time series and replace the full run names with names with
the '_all' suffix."""
# Put in some guesses for t1 and t2
stpath = "Config/Steady times/{}.csv".format(self.tow_speed_nom)
s_times = pd.read_csv(stpath)
s_times = s_times[s_times.tsr==self.tsr_nom].iloc[0]
self.t1, self.t2 = s_times.t1, s_times.t2
self.find_t2()
# Trim performance quantities
self.time_ni_all = self.time_ni
self.time_perf_all = self.time_ni
self.time_ni = self.time_ni_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.time_perf = self.time_ni
self.angle_all = self.angle
self.angle = self.angle_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.torque_all = self.torque
self.torque = self.torque_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.torque_arm_all = self.torque_arm
self.torque_arm = self.torque_arm_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.omega_all = self.omega
self.omega = self.omega_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.tow_speed_all = self.tow_speed
self.tow_speed = self.tow_speed_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.tsr_all = self.tsr
self.tsr = self.tsr_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.cp_all = self.cp
self.cp = self.cp_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.ct_all = self.ct
self.ct = self.ct_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.cd_all = self.cd
self.cd = self.cd_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.rpm_ni_all = self.rpm_ni
self.rpm_ni = self.rpm_ni_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
self.rpm = self.rpm_ni
self.rpm_all = self.rpm_ni_all
self.drag_all = self.drag
self.drag = self.drag_all[self.t1*self.sr_ni:self.t2*self.sr_ni]
def find_t2(self):
sr = self.sr_ni
angle1 = self.angle[sr*self.t1]
angle2 = self.angle[sr*self.t2]
n3rdrevs = np.floor((angle2-angle1)/120.0)
self.n_revs = int(np.floor((angle2-angle1)/360.0))
self.n_blade_pass = int(n3rdrevs)
angle2 = angle1 + n3rdrevs*120
t2i = np.where(np.round(self.angle)==np.round(angle2))[0][0]
t2 = self.time_ni[t2i]
self.t2 = np.round(t2, decimals=2)
self.t2found = True
self.t1_wake = self.t1
self.t2_wake = self.t2
def calc_perf_stats(self):
"""Calculates mean performance based on trimmed time series."""
self.mean_tsr, self.std_tsr = nanmean(self.tsr), nanstd(self.tsr)
self.mean_cp, self.std_cp = nanmean(self.cp), nanstd(self.cp)
self.mean_cd, self.std_cd = nanmean(self.cd), nanstd(self.cd)
self.mean_ct, self.std_ct = nanmean(self.ct), nanstd(self.ct)
self.mean_u_enc = nanmean(self.tow_speed)
self.mean_tow_speed = self.mean_u_enc
self.std_u_enc = nanstd(self.tow_speed)
self.std_tow_speed = self.std_u_enc
def print_perf_stats(self):
print("tow_speed_nom =", self.tow_speed_nom)
print("mean_tow_speed_enc =", self.mean_u_enc)
print("std_tow_speed_enc =", self.std_u_enc)
print("TSR = {:.2f} +/- {:.2f}".format(self.mean_tsr, self.exp_unc_tsr))
print("C_P = {:.2f} +/- {:.2f}".format(self.mean_cp, self.exp_unc_cp))
print("C_D = {:.2f} +/- {:.2f}".format(self.mean_cd, self.exp_unc_cd))
def calc_perf_uncertainty(self):
"""See uncertainty IPython notebook for equations."""
# Systematic uncertainty estimates
b_torque = 0.5/2
b_angle = 3.14e-5/2
b_car_pos = 0.5e-5/2
b_force = 0.28/2
# Uncertainty of C_P
omega = self.omega.mean()
torque = self.torque.mean()
u_infty = np.mean(self.tow_speed)
const = 0.5*rho*A
b_cp = np.sqrt((omega/(const*u_infty**3))**2*b_torque**2 + \
(torque/(const*u_infty**3))**2*b_angle**2 + \
(-3*torque*omega/(const*u_infty**4))**2*b_car_pos**2)
self.b_cp = b_cp
self.unc_cp = calc_uncertainty(self.cp_per_rev, b_cp)
# Drag coefficient
drag = self.drag.mean()
b_cd = np.sqrt((1/(const*u_infty**2))**2*b_force**2 + \
(1/(const*u_infty**2))**2*b_force**2 +
(-2*drag/(const*u_infty**3))**2*b_car_pos**2)
self.unc_cd = calc_uncertainty(self.cd_per_rev, b_cd)
self.b_cd = b_cd
# Tip speed ratio
b_tsr = np.sqrt((R/(u_infty))**2*b_angle**2 + \
(-omega*R/(u_infty**2))**2*b_car_pos**2)
self.unc_tsr = calc_uncertainty(self.tsr_per_rev, b_tsr)
self.b_tsr = b_tsr
def calc_perf_exp_uncertainty(self):
"""See uncertainty IPython notebook for equations."""
# Power coefficient
s_cp = self.std_cp_per_rev
nu_s_cp = len(self.cp_per_rev) - 1
b_cp = self.b_cp
b_cp_rel_unc = 0.25 # A guess
nu_b_cp = 0.5*b_cp_rel_unc**(-2)
nu_cp = ((s_cp**2 + b_cp**2)**2)/(s_cp**4/nu_s_cp + b_cp**4/nu_b_cp)
t = scipy.stats.t.interval(alpha=0.95, df=nu_cp)[-1]
self.exp_unc_cp = t*self.unc_cp
self.dof_cp = nu_cp
# Drag coefficient
s_cd = self.std_cd_per_rev
nu_s_cd = len(self.cd_per_rev) - 1
b_cd = self.b_cd
b_cd_rel_unc = 0.25 # A guess
nu_b_cd = 0.5*b_cd_rel_unc**(-2)
nu_cd = ((s_cd**2 + b_cd**2)**2)/(s_cd**4/nu_s_cd + b_cd**4/nu_b_cd)
t = scipy.stats.t.interval(alpha=0.95, df=nu_cd)[-1]
self.exp_unc_cd = t*self.unc_cd
self.dof_cd = nu_cd
# Tip speed ratio
s_tsr = self.std_tsr_per_rev
nu_s_tsr = len(self.tsr_per_rev) - 1
b_tsr = self.b_tsr
b_tsr_rel_unc = 0.25 # A guess
nu_b_tsr = 0.5*b_tsr_rel_unc**(-2)
nu_tsr = ((s_tsr**2 + b_tsr**2)**2)/(s_tsr**4/nu_s_tsr + b_tsr**4/nu_b_tsr)
t = scipy.stats.t.interval(alpha=0.95, df=nu_tsr)[-1]
self.exp_unc_tsr = t*self.unc_tsr
self.dof_tsr = nu_tsr
def calc_perf_per_rev(self):
"""Computes mean power coefficient over each revolution."""
angle = self.angle*1
angle -= angle[0]
cp = np.zeros(self.n_revs)
cd = np.zeros(self.n_revs)
tsr = np.zeros(self.n_revs)
torque = np.zeros(self.n_revs)
omega = np.zeros(self.n_revs)
start_angle = 0.0
for n in range(self.n_revs):
end_angle = start_angle + 360
ind = np.logical_and(angle >= start_angle, end_angle > angle)
cp[n] = self.cp[ind].mean()
cd[n] = self.cd[ind].mean()
tsr[n] = self.tsr[ind].mean()
torque[n] = self.torque[ind].mean()
omega[n] = self.omega[ind].mean()
start_angle += 360
self.cp_per_rev = cp
self.std_cp_per_rev = cp.std()
self.cd_per_rev = cd
self.std_cd_per_rev = cd.std()
self.tsr_per_rev = tsr
self.std_tsr_per_rev = tsr.std()
self.torque_per_rev = torque
self.std_torque_per_rev = torque.std()
@property
def cp_conf_interval(self, alpha=0.95):
self.calc_perf_per_rev()
t_val = scipy.stats.t.interval(alpha=alpha, df=self.n_revs-1)[1]
std = self.std_cp_per_rev
return t_val*std/np.sqrt(self.n_revs)
@property
def summary(self):
s = pd.Series()
s["run"] = self.nrun
if self.loaded:
s["tow_speed_nom"] = self.tow_speed_nom
s["tsr_nom"] = self.tsr_nom
s["mean_tow_speed"] = self.mean_u_enc
s["std_tow_speed"] = self.std_u_enc
s["t1"] = self.t1
s["t2"] = self.t2
s["n_blade_pass"] = self.n_blade_pass
s["n_revs"] = self.n_revs
s["mean_tsr"] = self.mean_tsr
s["mean_cp"] = self.mean_cp
s["mean_cd"] = self.mean_cd
s["std_tsr"] = self.std_tsr
s["std_cp"] = self.std_cp
s["std_cd"] = self.std_cd
s["std_tsr_per_rev"] = self.std_tsr_per_rev
s["std_cp_per_rev"] = self.std_cp_per_rev
s["std_cd_per_rev"] = self.std_cd_per_rev
s["exp_unc_tsr"] = self.exp_unc_tsr
s["exp_unc_cp"] = self.exp_unc_cp
s["exp_unc_cd"] = self.exp_unc_cd
s["dof_tsr"] = self.dof_tsr
s["dof_cp"] = self.dof_cp
s["dof_cd"] = self.dof_cd
else:
s["tow_speed_nom"] = np.nan
s["tsr_nom"] = np.nan
s["mean_tow_speed"] = np.nan
s["std_tow_speed"] = np.nan
s["t1"] = np.nan
s["t2"] = np.nan
s["n_blade_pass"] = np.nan
s["n_revs"] = np.nan
s["mean_tsr"] = np.nan
s["mean_cp"] = np.nan
s["mean_cd"] = np.nan
s["std_tsr"] = np.nan
s["std_cp"] = np.nan
s["std_cd"] = np.nan
s["std_tsr_per_rev"] = np.nan
s["std_cp_per_rev"] = np.nan
s["std_cd_per_rev"] = np.nan
s["exp_unc_tsr"] = np.nan
s["exp_unc_cp"] = np.nan
s["exp_unc_cd"] = np.nan
s["dof_tsr"] = np.nan
s["dof_cp"] = np.nan
s["dof_cd"] = np.nan
return s
def plot_perf(self, quantity="power coefficient"):
"""Plots the run's data"""
if not self.loaded:
self.load()
if quantity == "drag":
quantity = self.drag
ylabel = "Drag (N)"
ylim = None
elif quantity == "torque":
quantity = self.torque
ylabel = "Torque (Nm)"
ylim = None
elif quantity.lower == "power coefficient" or "cp" or "c_p":
quantity = self.cp
ylabel = "$C_P$"
ylim = (-1, 1)
plt.figure()
plt.plot(self.time_ni, quantity, 'k')
plt.xlabel("Time (s)")
plt.ylabel(ylabel)
plt.ylim(ylim)
plt.tight_layout()
def plot_wake(self):
"""Plot streamwise velocity over experiment."""
if not self.loaded:
self.load()
plt.figure()
self.filter_wake()
plt.plot(self.time_vec, self.u, 'k')
plt.xlabel("Time (s)")
plt.ylabel("$u$ (m/s)")
def plot_acs(self):
if not self.loaded:
self.load()
plt.figure()
plt.plot(self.time_acs, self.rpm_acs)
plt.hold(True)
plt.plot(self.time_ni, self.rpm_ni)
plt.figure()
plt.plot(self.time_ni, self.tow_speed_ni)
plt.hold(True)
plt.plot(self.time_acs, self.tow_speed_acs)
plt.show()
def plot_carriage_vel(self):
if not self.loaded:
self.load()
plt.figure()
plt.plot(self.time_ni, self.tow_speed_ni)
plt.tight_layout()
plt.show()
class Section(object):
def __init__(self, name):
self.name = name
self.processed_path = os.path.join(processed_data_dir, name+".csv")
self.test_plan_path = os.path.join("Config", "Test plan", name+".csv")
self.load()
def load(self):
self.test_plan = pd.read_csv(self.test_plan_path, index_col="run")
try:
self.data = pd.read_csv(self.processed_path, index_col="run")
except IOError:
self.data = pd.DataFrame()
@property
def mean_cp(self):
return self.data.mean_cp
def process(self, nproc=2, nruns="all", save=True):
"""Process an entire section of data."""
self.process_parallel(nproc=nproc, nruns=nruns)
self.data.index.name = "run"
self.data = self.data.sort()
if save:
self.data.to_csv(self.processed_path, na_rep="NaN", index=True)
def process_parallel(self, nproc=2, nruns="all"):
s = self.name
runs = self.test_plan.index.values
if nruns != "all":
if nruns == "new":
try:
runs = runs[np.where(np.isnan(self.data.mean_cp))]
self.data = self.data.iloc[np.where(~np.isnan(self.data.mean_cp))]
except AttributeError:
pass
else:
runs = runs[:nruns]
if len(runs) > 0:
pool = mp.Pool(processes=nproc)
results = [pool.apply_async(process_run, args=(s,n)) for n in runs]
output = [p.get() for p in results]
pool.close()
self.newdata = pd.DataFrame(output)
self.newdata.set_index("run", inplace=True)
self.newdata = self.newdata.sort()
else:
self.newdata = pd.DataFrame()
if nruns == "all":
self.data = self.newdata
else:
self.data = self.data.append(self.newdata)
def process_run(section, nrun):
print("Processing {} run {}".format(section, nrun))
run = Run(section, nrun)
return run.summary
def process_latest_run(section):
"""
Automatically detects the most recently acquired run and processes it,
printing a summary to the shell.
"""
print("Processing latest run in", section)
raw_dir = os.path.join("Data", "Raw", section)
dirlist = [os.path.join(raw_dir, d) for d in os.listdir(raw_dir) \
if os.path.isdir(os.path.join(raw_dir, d))]
dirlist = sorted(dirlist, key=os.path.getmtime, reverse=True)
for d in dirlist:
try:
nrun = int(os.path.split(d)[-1])
break
except ValueError:
print(d, "is not a properly formatted directory")
print("\nSummary for {} run {}:".format(section, nrun))
print(Run(section, nrun).summary)
def load_test_plan_section(section):
df = pd.read_csv(os.path.join("Config", "Test plan", section+".csv"))
df = df.dropna(how="all", axis=1).dropna(how="all", axis=0)
if "Run" in df:
df["Run"] = df["Run"].astype(int)
return df
def batch_process_section(name):
s = Section(name)
s.process()
def batch_process_all(sections=["Curve-0", "Curve-1"]):
"""Batch processes all sections."""
for section in sections:
print("Processing {}".format(section))
batch_process_section(section)
def process_tare_drag(nrun, plot=False):
"""Processes a single tare drag run."""
print("Processing tare drag run", nrun)
times = {0.2: (15, 120),
0.3: (10, 77),
0.4: (10, 56),
0.5: (8, 47),
0.6: (10, 40),
0.7: (8, 33),
0.8: (5, 31),
0.9: (8, 27),
1.0: (6, 24),
1.1: (9, 22),
1.2: (8, 21),
1.3: (7, 19),
1.4: (6, 18)}
rdpath = os.path.join(raw_data_dir, "Tare-drag", str(nrun))
with open(os.path.join(rdpath, "metadata.json")) as f:
metadata = json.load(f)
speed = float(metadata["Tow speed (m/s)"])
nidata = loadhdf(os.path.join(rdpath, "nidata.h5"))
time_ni = nidata["time"]
drag = nidata["drag_left"] + nidata["drag_right"]
drag = drag - np.mean(drag[:2000])
t1, t2 = times[speed]
meandrag, x = ts.calcstats(drag, t1, t2, 2000)
print("Tare drag =", meandrag, "N at", speed, "m/s")
if plot:
plt.figure()
plt.plot(time_ni, drag, 'k')
plt.show()
return speed, meandrag
def batch_process_tare_drag(plot=False):
"""Processes all tare drag data."""
runs = os.listdir("Data/Raw/Tare-drag")
runs = sorted([int(run) for run in runs])
speed = np.zeros(len(runs))
taredrag = np.zeros(len(runs))
for n in range(len(runs)):
speed[n], taredrag[n] = process_tare_drag(runs[n])
data = pd.DataFrame()
data["run"] = runs
data["tow_speed"] = speed
data["tare_drag"] = taredrag
data.to_csv("Data/Processed/Tare-drag.csv", index=False)
if plot:
plt.figure()
plt.plot(speed, taredrag, "-ok", markerfacecolor="None")
plt.xlabel("Tow speed (m/s)")
plt.ylabel("Tare drag (N)")
plt.tight_layout()
plt.show()
def process_tare_torque(nrun, plot=False):
"""Processes a single tare torque run."""
print("Processing tare torque run", nrun)
times = {0 : (35, 86),
1 : (12, 52),
2 : (11, 32),
3 : (7, 30)}
nidata = loadhdf("Data/Raw/Tare-torque/" + str(nrun) + "/nidata.h5")
# Compute RPM
time_ni = nidata["time"]
angle = nidata["turbine_angle"]
rpm_ni = fdiff.second_order_diff(angle, time_ni)/6.0
rpm_ni = ts.smooth(rpm_ni, 8)
try:
t1, t2 = times[nrun]
except KeyError:
t1, t2 = times[3]
meanrpm, _ = ts.calcstats(rpm_ni, t1, t2, 2000)
torque = nidata["torque_trans"]
meantorque, _ = ts.calcstats(torque, t1, t2, 2000)
print("Tare torque =", meantorque, "Nm at", meanrpm, "RPM")
if plot:
plt.figure()
plt.plot(time_ni, torque)
plt.xlabel("Time (s)")
plt.ylabel("Torque (Nm)")
plt.tight_layout()
plt.show()
return meanrpm, -meantorque
def process_strut_torque(nrun, zero_torque=0.0, plot=False, covers=False,
verbose=False):
"""Processes a single strut torque run."""
testplan = pd.read_csv("Config/Test plan/Strut-torque.csv",
index_col="run")
ref_speed = testplan.ref_speed.iloc[nrun]
tsr_nom = testplan.tsr.iloc[nrun]
revs = testplan.revs.iloc[nrun]
rpm_nom = tsr_nom*ref_speed/R/(2*np.pi)*60
dur = revs/rpm_nom*60
if covers:
if verbose:
print("Processing strut torque with covers run", nrun)
nidata = loadhdf("Data/Raw/Strut-torque-covers/" + str(nrun) + \
"/nidata.h5")
else:
if verbose:
print("Processing strut torque run", nrun)
nidata = loadhdf("Data/Raw/Strut-torque/" + str(nrun) + "/nidata.h5")
# Compute RPM
time_ni = nidata["time"]
angle = nidata["turbine_angle"]
rpm_ni = fdiff.second_order_diff(angle, time_ni)/6.0
rpm_ni = ts.smooth(rpm_ni, 8)
t1, t2 = 9, dur
meanrpm, _ = ts.calcstats(rpm_ni, t1, t2, 2000)
torque = nidata["torque_trans"]
torque += calc_tare_torque(rpm_ni)
meantorque, _ = ts.calcstats(torque, t1, t2, 2000)
tsr_ref = meanrpm/60.0*2*np.pi*R/ref_speed
if verbose:
print("Reference TSR =", np.round(tsr_ref, decimals=4))
print("Strut torque =", meantorque, "Nm at", meanrpm, "RPM")
if plot:
plt.figure()
plt.plot(time_ni, torque)
plt.xlabel("Time (s)")
plt.ylabel("Torque (Nm)")
plt.tight_layout()
plt.show()
meantorque -= zero_torque
ct = meantorque/(0.5*rho*A*R*ref_speed**2)
cp = ct*tsr_ref
summary = pd.Series()
summary["run"] = nrun
summary["tsr_ref"] = tsr_ref
summary["cp"] = cp
summary["mean_torque"] = meantorque
summary["mean_rpm"] = meanrpm
return summary
def batch_process_tare_torque(plot=False):
"""Processes all tare torque data."""
runs = os.listdir("Data/Raw/Tare-torque")
runs = sorted([int(run) for run in runs])
rpm = np.zeros(len(runs))
taretorque = np.zeros(len(runs))
for n in range(len(runs)):
rpm[n], taretorque[n] = process_tare_torque(runs[n])
df = pd.DataFrame()
df["run"] = runs
df["rpm"] = rpm
df["tare_torque"] = taretorque
df.to_csv("Data/Processed/Tare-torque.csv", index=False)
m, b = np.polyfit(rpm, taretorque, 1)
print("tare_torque = "+str(m)+"*rpm +", b)
if plot:
plt.figure()
plt.plot(rpm, taretorque, "-ok", markerfacecolor="None")
plt.plot(rpm, m*rpm + b)
plt.xlabel("RPM")
plt.ylabel("Tare torque (Nm)")
plt.tight_layout()
plt.show()
def batch_process_strut_torque(covers=False):
section = "Strut-torque"
if covers:
section += "-covers"
testplan = pd.read_csv("Config/Test plan/" + section + ".csv")
df = []
for run in testplan.run:
df.append(process_strut_torque(run, covers=covers))
df = pd.DataFrame(df)
df.to_csv("Data/Processed/" + section + ".csv", index=False)
def make_remote_name(local_path):
return "_".join(local_path.split("\\")[-3:])
def download_raw(section, nrun, name):
"""
Downloads a run's raw data. `name` can be either the file name with
extension, or
* `"metadata"` -- Metadata in JSON format
* `"nidata"` -- Data from the NI DAQ system
* `"acsdata"` -- Data from the tow tank's motion controller
* `"vecdata"` -- Data from the Nortek Vectrino
"""
if name == "metadata":
filename = "metadata.json"
elif name in ["vecdata", "nidata", "acsdata"]:
filename = name + ".h5"
else:
filename = name
print("Downloading", filename, "from", section, "run", nrun)
local_dir = os.path.join("Data", "Raw", section, str(nrun))
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
local_path = os.path.join(local_dir, filename)
remote_name = make_remote_name(local_path)
with open("Config/raw_data_urls.json") as f:
urls = json.load(f)
url = urls[remote_name]
pbar = progressbar.ProgressBar()
def download_progress(blocks_transferred, block_size, total_size):
percent = int(blocks_transferred*block_size*100/total_size)
try:
pbar.update(percent)
except ValueError:
pass
except AssertionError:
pass
pbar.start()
urlretrieve(url, local_path, reporthook=download_progress)
pbar.finish()
def make_perf_table(section_name="Curve-0", table_name="Perf"):
"""
Generates a table with run number, tow speed, tip speed ratio,
power coefficient, and drag coefficient as columns.
"""
df = pd.read_csv("Data/Processed/{}.csv".format(section_name))
df2 = pd.DataFrame()
df2["Run"] = df.run
df2["Tow speed (m/s)"] = np.round(df.mean_tow_speed, decimals=3)
df2[r"$\lambda$"] = np.round(df.mean_tsr, decimals=3)
df2[r"$C_P$"] = np.round(df.mean_cp, decimals=3)
df2[r"$C_D$"] = np.round(df.mean_cd, decimals=3)
df2.to_csv("Tables/{}.csv".format(table_name), index=False)
def make_re_dep_table(section_name="Re-dep", table_name="Re-dep", c=0.14):
"""
Generates a summary table for the Reynolds number dependence data.
"""
df = pd.read_csv("Data/Processed/{}.csv".format(section_name))
df2 = pd.DataFrame()
df2["Run"] = df.run
df2[r"$Re_D$"] = np.round(df.mean_tow_speed*D/nu, decimals=3)
df2[r"$Re_c$"] = np.round(df.mean_tow_speed*1.9*c/nu, decimals=3)
df2[r"$C_P$"] = np.round(df.mean_cp, decimals=3)
df2[r"$C_D$"] = np.round(df.mean_cd, decimals=3)
df2.to_csv("Tables/{}.csv".format(table_name), index=False)
| mit |
habi/GlobalDiagnostiX | readAptinaRAW.py | 1 | 1313 | # -*- coding: utf-8 -*-
"""
This script reads the RAW files from the Aptina cameras as numpy arrays,
ready for display or further use.
Made to help Valerie Duay get up to speed :)
"""
import os
import numpy
import matplotlib.pyplot as plt
Directory = '/scratch/tmp/DevWareX/MT9M001/DSL949A-NIR/'
Folder = '1394629994_MT9M001_DSL949A-NIR_0.0_0.0f_040ms_090mm_to150mm'
File = 'MT9M001_1280x1024_DSL949A-NIR_0.0_0.0f_040ms_090mm_to150mm_090mm.raw'
Size = [int(File.split('_')[1].split('x')[1]),
int(File.split('_')[1].split('x')[0])]
# fromfile
FileToLoad = os.path.join(Directory, Folder, File)
FromFile = numpy.fromfile(FileToLoad, dtype=numpy.uint16).reshape(Size)
# FromFile -= numpy.mean(FromFile)
MemMap = numpy.memmap(FileToLoad, dtype=numpy.uint16, shape=(Size[0], Size[1]))
# MemMap -= numpy.mean(MemMap)
plt.figure(File)
plt.subplot(121)
plt.imshow(FromFile, cmap='gray')
plt.title('numpy.fromfile > leaves file')
plt.subplot(122)
plt.imshow(MemMap, cmap='gray')
plt.title('numpy.memmap > destroys file')
plt.show()
print 'Only use "numpy.memmap" for displaying files! If you perform some',\
'calculations on the files (e.g "File -= numpy.mean(File)") these',\
'calculations are immediately saved to disk, essentially destroying the',\
'file! In this case use "numpy.fromfile"!'
| unlicense |
daniel20162016/my-first | read_xml_all/good_version_read_xml_1/read_wav_xml_good_1.py | 2 | 2673 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
from matplotlib import pylab as plt
from numpy import fft, fromstring, int16, linspace
import wave
from good_read_xml_1 import*
# open a wave file
#filename = 'francois_filon_pure_1.wav'
#filename_1 ='francois_filon_pure_1.xml'
#word ='je'
def read_wav_xml_good_1(filename, filename_1,word):
mode = 'rb' # mode can be : 'r' (for read), 'w' (write), 'rb' (read like a binary file) or 'wb' (write like a binary file)
file_read = wave.open(filename,mode)
# to get the wave file parameters
params = file_read.getparams() # This variable return several values ( nchannels, sampwith, framerate, nframe, comptype, compname)
nchannels = params[0] # the number of channel (1 for mono / 2 for stereo)
sampwith = params[1] # in octet (number of bit coding)
framerate = params[2] # the samples rate (in Hz)
nframes = params[3] # number of samples
# to get the waveforme of the wave file
wave_signal_string = file_read.readframes(nframes) # get the samples contening in the wave file (in string format)
# conversion to int16 in into float
wave_signal_int16 = fromstring(wave_signal_string,int16)
wave_signal_float = 1* wave_signal_int16.astype(float)
# plotting
Nsamples = len(wave_signal_float)
xtimes = linspace(0,Nsamples,Nsamples)
#plt.plot(xtimes, wave_signal_float)
#plt.grid()
#plt.show()
# filter the signal,,, begin
length_wave_signal_float= len(wave_signal_float)
# print 'length_wave_signal_float=',length_wave_signal_float
# print 'framerate=',framerate
s=wave_signal_float
t_total=length_wave_signal_float/framerate
# print 't_total=',t_total
#plt.plot(xtimes, s)
# filter the signal,,, end
# close the open wave file
file_read.close()
word_start_time, word_length_time, word_end_time = good_read_xml_1(filename_1,word)
word_start_point = word_start_time*framerate
word_length_point = word_length_time*framerate
word_end_point=word_end_time*framerate
return wave_signal_float,framerate, word_start_point,word_length_point,word_end_point
#print word_start_point
#print word_end_point
# save result of a wave file
#
#filename = 'out_01_filter1.wav'
#mode = 'wb'
#file_write = wave.open(filename,mode)
#
#file_write.setframerate(framerate)
#file_write.setnchannels(nchannels)
#file_write.setsampwidth(sampwith)
#
## reconversion into string
#wave_signal_int16 = wave_signal_float.astype(int16)
#wave_signal_string= wave_signal_int16.tostring()
##print type(wave_signal_string)
#
#file_write.writeframes(b''.join(wave_signal_string))
#file_write.close() | mit |
tum-camp/survival-support-vector-machine | survival/svm/naive_survival_svm.py | 1 | 6322 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import itertools
import numpy
from scipy.misc import comb
from sklearn.svm import LinearSVC
from sklearn.utils import check_random_state
from ..base import SurvivalAnalysisMixin
from ..util import check_arrays_survival
class NaiveSurvivalSVM(LinearSVC, SurvivalAnalysisMixin):
"""Naive version of linear Survival Support Vector Machine.
Uses regular linear support vector classifier (liblinear).
A new set of samples is created by building the difference between any two feature
vectors in the original data, thus this version requires `O(n_samples^2)` space.
See :class:`survival.svm.HingeLossSurvivalSVM` for the kernel naive survival SVM.
.. math::
\\min_{\\mathbf{w}}\\quad
\\frac{1}{2} \\lVert \\mathbf{w} \\rVert_2^2
+ \\gamma \\sum_{i = 1}^n \\xi_i \\\\
\\text{subject to}\\quad
\\mathbf{w}^\\top \\mathbf{x}_i - \\mathbf{w}^\\top \\mathbf{x}_j \\geq 1 - \\xi_{ij},\\quad
\\forall (i, j) \\in \\mathcal{P}, \\\\
\\xi_i \\geq 0,\\quad \\forall (i, j) \\in \\mathcal{P}.
\\mathcal{P} = \\{ (i, j) \\mid y_i > y_j \\land \\delta_j = 1 \\}_{i,j=1,\\dots,n}.
Parameters
----------
alpha : float, positive (default=1.0)
Weight of penalizing the squared hinge loss in the objective function (default: 1)
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
verbose : int (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int (default=1000)
The maximum number of iterations to be run.
References
----------
.. [1] Van Belle, V., Pelckmans, K., Suykens, J. A., & Van Huffel, S.
Support Vector Machines for Survival Analysis. In Proc. of the 3rd Int. Conf.
on Computational Intelligence in Medicine and Healthcare (CIMED). 1-8. 2007
.. [2] Evers, L., Messow, C.M.,
"Sparse kernel methods for high-dimensional survival data",
Bioinformatics 24(14), 1632-8, 2008.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=False, tol=1e-4,
alpha=1.0, verbose=0, random_state=None, max_iter=1000):
super().__init__(penalty=penalty,
loss=loss,
dual=dual,
tol=tol,
verbose=verbose,
random_state=random_state,
max_iter=max_iter,
fit_intercept=False)
self.alpha = alpha
def _get_survival_pairs(self, X, y, random_state):
X, event, time = check_arrays_survival(X, y)
idx = numpy.arange(X.shape[0], dtype=int)
random_state.shuffle(idx)
n_pairs = int(comb(X.shape[0], 2))
x_pairs = numpy.empty((n_pairs, X.shape[1]), dtype=float)
y_pairs = numpy.empty(n_pairs, dtype=numpy.int8)
k = 0
for xi, xj in itertools.combinations(idx, 2):
if time[xi] > time[xj] and event[xj]:
x_pairs[k, :] = X[xi, :] - X[xj, :]
y_pairs[k] = 1
k += 1
elif time[xi] < time[xj] and event[xi]:
x_pairs[k, :] = X[xi, :] - X[xj, :]
y_pairs[k] = -1
k += 1
elif time[xi] == time[xj] and (event[xi] or event[xj]):
x_pairs[k, :] = X[xi, :] - X[xj, :]
y_pairs[k] = 1 if event[xj] else -1
k += 1
x_pairs.resize((k, X.shape[1]))
y_pairs.resize(k)
return x_pairs, y_pairs
def fit(self, X, y):
"""Build a survival support vector machine model from training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data matrix.
y : structured array, shape = [n_samples]
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
random_state = check_random_state(self.random_state)
x_pairs, y_pairs = self._get_survival_pairs(X, y, random_state)
self.C = self.alpha
return super().fit(x_pairs, y_pairs)
def predict(self, X):
"""Rank samples according to survival times
Lower ranks indicate shorter survival, higher ranks longer survival.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
Predicted ranks.
"""
return -self.decision_function(X)
| gpl-3.0 |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| agpl-3.0 |
bsipocz/statsmodels | statsmodels/examples/ex_lowess.py | 34 | 2827 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:26:06 2011
Author: Chris Jordan Squire
extracted from test suite by josef-pktd
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# this is just to check direct import
import statsmodels.nonparametric.smoothers_lowess
statsmodels.nonparametric.smoothers_lowess.lowess
x = np.arange(20.)
#standard normal noise
noise = np.array([-0.76741118, -0.30754369,
0.39950921, -0.46352422, -1.67081778,
0.6595567 , 0.66367639, -2.04388585,
0.8123281 , 1.45977518,
1.21428038, 1.29296866, 0.78028477,
-0.2402853 , -0.21721302,
0.24549405, 0.25987014, -0.90709034,
-1.45688216, -0.31780505])
y = x + noise
expected_lowess = np.array([[ 0. , -0.58337912],
[ 1. , 0.61951246],
[ 2. , 1.82221628],
[ 3. , 3.02536876],
[ 4. , 4.22667951],
[ 5. , 5.42387723],
[ 6. , 6.60834945],
[ 7. , 7.7797691 ],
[ 8. , 8.91824348],
[ 9. , 9.94997506],
[ 10. , 10.89697569],
[ 11. , 11.78746276],
[ 12. , 12.62356492],
[ 13. , 13.41538492],
[ 14. , 14.15745254],
[ 15. , 14.92343948],
[ 16. , 15.70019862],
[ 17. , 16.48167846],
[ 18. , 17.26380699],
[ 19. , 18.0466769 ]])
actual_lowess = lowess(y, x)
print(actual_lowess)
print(np.max(np.abs(actual_lowess-expected_lowess)))
plt.plot(y, 'o')
plt.plot(actual_lowess[:,1])
plt.plot(expected_lowess[:,1])
import os.path
import statsmodels.nonparametric.tests.results
rpath = os.path.split(statsmodels.nonparametric.tests.results.__file__)[0]
rfile = os.path.join(rpath, 'test_lowess_frac.csv')
test_data = np.genfromtxt(open(rfile, 'rb'),
delimiter = ',', names = True)
expected_lowess_23 = np.array([test_data['x'], test_data['out_2_3']]).T
expected_lowess_15 = np.array([test_data['x'], test_data['out_1_5']]).T
actual_lowess_23 = lowess(test_data['y'], test_data['x'] ,frac = 2./3)
actual_lowess_15 = lowess(test_data['y'], test_data['x'] ,frac = 1./5)
#plt.show()
| bsd-3-clause |
verdverm/pypge | experiments/post_process/scripts/timing_stats_05.py | 1 | 1221 | import pandas as pd
import sys
pgefile=sys.argv[1]
df = pd.read_csv(pgefile, delim_whitespace=True)
df2 = df[sys.argv[2:]]
pge = df2.groupby("problem")
for name, grp in pge:
# print(name)
# print(grp)
ac_t = grp["elapsed_seconds"].iloc[2]/grp["elapsed_seconds"].iloc[0]
ac_m = grp["evald_models"].iloc[2]/grp["evald_models"].iloc[0]
ac_r = ac_m / ac_t
bd_t = grp["elapsed_seconds"].iloc[3]/grp["elapsed_seconds"].iloc[1]
bd_m = grp["evald_models"].iloc[3]/grp["evald_models"].iloc[1]
bd_r = bd_m / bd_t
fstr = "{:21s} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f}"
out = fstr.format(name, ac_t, ac_m, ac_r, bd_t, bd_m, bd_r)
print(out)
# print(name, ac_t)
# print(name, ac_t, ac_m, ac_r, bd_t, bd_m, bd_r)
print("\n\n")
for name, grp in pge:
# print(name)
# print(grp)
r2 = grp["best_r2"]
fstr = "{:21s} & {:.3f} & {:.3f} & {:.3f} & {:.3f}"
out = fstr.format(name, r2.iloc[0], r2.iloc[1], r2.iloc[2], r2.iloc[3])
print(out)
print("\n\n")
for name, grp in pge:
# print(name)
# print(grp)
r2 = grp["ave_size"]
fstr = "{:21s} & {:.0f} & {:.0f} & {:.0f} & {:.0f}"
out = fstr.format(name, r2.iloc[0], r2.iloc[1], r2.iloc[2], r2.iloc[3])
print(out)
| mit |
jmetzen/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/tests/test_multioutput.py | 39 | 6609 | import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:,n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X_train, y_train)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1,2,3], [4,5,6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1,2,3], [4,5,6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1,2,3], [1,2,3], [4,5,6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5,2.5,3.5], [3.5,4.5,5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert_equal((n_samples, n_classes, n_outputs), predict_proba.shape)
assert_array_equal(np.argmax(predict_proba, axis=1), predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[:, :, i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| bsd-3-clause |
JeremyRubin/Graffiti-codes | Graffiti-server/Processor.py | 1 | 3841 | from App import *
import ast
import numpy
import matplotlib.pyplot as plt
import datetime
import scipy
from scipy import signal, integrate
from numpy import trapz
class Processor(object):
""" This class processes the data from the Phone"""
def __init__(self, data):
data = ast.literal_eval(data)
# Apply wiener filter to the data
self.x = signal.wiener([ float(x) for x in data["dataX"] ])
self.y = signal.wiener([ float(y) for y in data["dataY"]])
self.z = signal.wiener([ float(z) for z in data["dataZ"]])
t = data["timestamps"]
# set a message if one is included
try:
self.msg = data['msg']
except KeyError:
self.msg = False
#convert timestamps into deltas
self.t = [(int(x)-int(t[0]))*10**-9 for x in t]
def mag(self,x,y,p,q):
# given two vectors x and y (and a constant adjustment p and q,
# compute the magnitude at each time
mag = []
for ind, el in enumerate(x):
mag.append((float(el)-p)**2+(float(y[ind])-q)**2)
return mag
def smooth(self, x, length):
# for length vaues of x, smooth the results by averaging over neighbors
# Could be improved for sure
smth = []
smooth_rate = 30
for index in xrange(length):
val = 0
ct = 1
for s in xrange(smooth_rate):
if s >= index:
continue
ct+=1
val+=x[index-s]
smth.append(val/ct)
return smth
def peaks(self, a, b, show=False):
# run several of the functions
mag = self.mag(a,b,0,0)
smooth = self.smooth(mag, len(self.t))
avg = (self.avg(smooth))
if show:
plt.plot(self.t,[avg for x in xrange(len(self.t))],show+'--')
plt.plot(self.t,smooth, show)
return (smooth, self.function_crosses(smooth, avg, True))
def avg(self,x):
# avg an array
return sum(x)/len(x)
def function_crosses(self,function, boundry, preserve):
# Find all of the crosses over the boundry for a dataset
switch = False
passes = 0
passIndex =[]
for index, el in enumerate(function):
if (switch == False) and (el> boundry):
switch = True
passes+=1
passIndex.append(index)
else:
pass
if el < boundry:
switch = False
return passIndex
def run(self):
# run the tests and return results
(smoothXY, xy) = self.peaks(self.x,self.y, show=None)
return (xy,0,0)
"""
Ignore this stuff for now
"""
def calibrate(self, x):
y = 0
for x in xrange(100):
y+=0
return y/100
def splitter(self, indexes, array):
# split an array based on indices
base = 0
result = []
for index in indexes:
result.append(array[base:index])
base = index
return result
def calcLength(self, x):
# calculate length using a trapezoidal integration
return trapz(trapz(x,self.t),self.t)
def function_up_downs(self, function, boundry):
switch = False
secSwitch = True
passes = 0
ct = 0
passIndex = []
for index, el in enumerate(function):
if (switch == False) and (el > boundry):
switch = True
if secSwitch:
passIndex.append(index)
secSwitch = False
if el < boundry:
switch = False
ct+=1
if ct == 2:
secSwitch = True
ct = 0
return passIndex
| mit |
dsquareindia/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 41 | 12562 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by OneHotEncoder to complete binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
tvaroska/autopandas | autopandas/transformers.py | 1 | 3679 | """
Collection of transformers for autopandas
"""
import numpy as np
from sklearn.base import TransformerMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
class DataFrameImputer(TransformerMixin):
"""
Credits http://stackoverflow.com/a/25562948/1575066
"""
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series([
X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else
X[c].mean() if X[c].dtype == np.dtype(float) else X[c].median()
for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill, inplace=False)
class LabelEncoderFix(LabelEncoder):
"""
Just correction of encoder.
To be deleted.
"""
def fit(self, y):
return super().fit(y.astype(str))
def transform(self, y):
return super().transform(y.astype(str))
def fit_transform(self, y):
return super().fit_transform(y.astype(str))
class CategoryTransformer(TransformerMixin):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value in column.
Columns of other types are imputed with mean of column.
"""
def __init__(self, **kwargs):
self.fill = []
super().__init__()
def unique(self, data):
""" calculation of unique values in array
numpy calculation fails if contains nan
"""
unique = []
counts = []
for row in data:
if row in unique:
counts[unique.index(row)] += 1
else:
unique.append(row)
counts.append(1)
return unique, counts
def fit(self, X, y=None):
"""
Fit transformer
"""
if y != None:
raise NotImplementedError
self.fill = []
if len(X.shape) == 1 or X.shape[1] == 1:
unique, counts = self.unique(X)
ind = np.argmax(counts)
self.fill.append([ind, unique, counts])
else:
columns = X.shape[1]
for column in range(columns):
unique, counts = self.unique(X[column])
ind = np.argmax(counts)
self.fill.append([ind, unique, counts])
return self
def transform(self, X, y=None):
ret = []
for rowdata in X:
resdata = []
for i in range(len(self.fill[0][1])):
if rowdata == self.fill[0][1][i]:
resdata.append(1)
else:
resdata.append(0)
ret.append(resdata)
return np.array(ret)
class LinearImputer(TransformerMixin):
def __init__(self, **kwargs):
self.model = None
super().__init__(**kwargs)
def fit(self, X, y=None):
if y is None:
train = X[~np.isnan(X).any(axis=1)]
else:
train = np.concatenate((X, y), axis = 1)[~np.isnan(X).any(axis=1)]
x_train = train[:, 1:]
y_train = train[:, 0:1]
self.model = LinearRegression()
self.model.fit(x_train, y_train)
return self
def transform(self, X, *_):
result = np.array(X, copy=True)
for row in result:
if np.isnan(row[0]):
row[0] = self.model.predict(row[1:].reshape(1,-1))
return result
| mit |
Jimmy-Morzaria/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
equialgo/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
francisco-dlp/hyperspy | hyperspy/_signals/signal1d.py | 1 | 58255 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
try:
from statsmodels.nonparametric.smoothers_lowess import lowess
statsmodels_installed = True
except BaseException:
statsmodels_installed = False
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval
from hyperspy.models.model1d import Model1D
from hyperspy.misc.utils import signal_range_from_roi
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea
from hyperspy import components1d
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
# asarray is required for consistensy as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _spikes_diagnosis(self, signal_mask=None,
navigation_mask=None):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked).
See also
--------
spikes_removal_tool
"""
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
tmph = tmp.get_histogram()
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
def spikes_removal_tool(self, signal_mask=None,
navigation_mask=None, display=True, toolkit=None):
"""Graphical interface to remove spikes from EELS spectra.
Parameters
----------
signal_mask : boolean array
Restricts the operation to the signal locations not marked
as True (masked)
navigation_mask : boolean array
Restricts the operation to the navigation locations not
marked as True (masked)
%s
%s
See also
--------
`_spikes_diagnosis`
"""
self._check_signal_dimension_equals_one()
sr = SpikesRemoval(self,
navigation_mask=navigation_mask,
signal_mask=signal_mask)
return sr.gui(display=display, toolkit=toolkit)
spikes_removal_tool.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def interpolate_in_between(self, start, end, delta=3, parallel=None,
show_progressbar=None, **kwargs):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation
%s
%s
All extra keyword arguments are passed to
`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function, ragged=False,
parallel=parallel, show_progressbar=show_progressbar)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def _check_navigation_mask(self, mask):
if mask is not None:
if not isinstance(mask, BaseSignal):
raise ValueError("mask must be a BaseSignal instance.")
elif mask.axes_manager.signal_dimension not in (0, 1):
raise ValueError("mask must be a BaseSignal "
"with signal_dimension equal to 1")
elif (mask.axes_manager.navigation_dimension !=
self.axes_manager.navigation_dimension):
raise ValueError("mask must be a BaseSignal with the same "
"navigation_dimension as the current signal.")
def estimate_shift1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
Returns
-------
An array with the result of the estimation in the axis units. \
Although the computation is performed in batches if the signal is \
lazy, the result is computed in memory because it depends on the \
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
`estimate_shift1D`
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
`integrate_simpson`
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
signal_range = signal_range_from_roi(signal_range)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
signal_range = signal_range_from_roi(signal_range)
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the offset, units and scale directly
* selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None, display=True, toolkit=None):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None, display=True, toolkit=None):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
ImportError
If statsmodels is not installed.
Notes
-----
This method uses the lowess algorithm from the `statsmodels` library,
which needs to be installed to use this method.
"""
if not statsmodels_installed:
raise ImportError("statsmodels is not installed. This package is "
"required for this feature.")
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
exog=self.axes_manager[-1].axis,
frac=smoothing_parameter,
it=number_of_iterations,
is_sorted=True,
return_sorted=False,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, DISPLAY_DT,
TOOLKIT_DT)
def smooth_tv(self, smoothing_parameter=None, show_progressbar=None,
parallel=None, display=True, toolkit=None):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, DISPLAY_DT,
TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None):
signal_range = signal_range_from_roi(signal_range)
from hyperspy.models.model1d import Model1D
model = Model1D(self)
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if fast and not self._lazy:
try:
axis = self.axes_manager.signal_axes[0].axis
result = self - background_estimator.function_nd(axis)
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
else:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar)
model.reset_signal_range()
result = self - model.as_signal(show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power Law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None, display=True, toolkit=None):
"""
Remove the background, either in place using a gui or returned as a new
spectrum using the command line.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: PowerLaw, Gaussian, Offset, Polynomial
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards.This is slower compared to the estimation but
possibly more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
%s
%s
%s
Examples
--------
Using gui, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a spectrum
>>> s1 = s.remove_background(signal_range=(400,450), background_type='PowerLaw')
Using a full model to fit the background
>>> s1 = s.remove_background(signal_range=(400,450), fast=False)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill)
return br.gui(display=display, toolkit=toolkit)
else:
if background_type in ('PowerLaw', 'Power Law'):
background_estimator = components1d.PowerLaw()
elif background_type == 'Gaussian':
background_estimator = components1d.Gaussian()
elif background_type == 'Offset':
background_estimator = components1d.Offset()
elif background_type == 'Polynomial':
background_estimator = components1d.Polynomial(
polynomial_order)
else:
raise ValueError(
"Background type: " +
background_type +
" not recognized")
spectra = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar)
return spectra
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = signal_range_from_roi(left_value)
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None, slope_thresh=0, amp_thresh=None,
subchannel=True, medfilt_radius=5, maxpeakn=30000,
peakgroup=10, parallel=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, optional
default is set to True.
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
inplace=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= PARALLEL_ARG
def estimate_peak_width(self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
| gpl-3.0 |
nesterione/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
greysAcademicCode/batch-iv-analysis | batch_iv_analysis/gui.py | 1 | 54573 | from batch_iv_analysis.batch_iv_analysis_UI import Ui_batch_iv_analysis
# needed for file watching
import time
# for performance tuning
#import cProfile, pstats, io
#pr = cProfile.Profile()
import math
#TODO: make area editable
from collections import OrderedDict
from itertools import zip_longest
import os, sys, inspect, csv
import scipy.io as sio # for savemat
import numpy as np
import h5py
from PyQt5.QtCore import QSettings, Qt, QSignalMapper, QFileSystemWatcher, QDir, QFileInfo, QObject, pyqtSignal, QRunnable
from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QFileDialog, QTableWidgetItem, QCheckBox, QPushButton, QItemDelegate
import matplotlib.pyplot as plt
plt.switch_backend("Qt5Agg")
class Object(object):
pass
def runGUI(analyzer, args=None):
app = QApplication(sys.argv)
analysis = MainWindow(analyzer)
analysis.show()
ret = app.exec_()
sys.exit(ret)
class customSignals(QObject):
newFitResult = pyqtSignal(object)
#populateRow = pyqtSignal(object)
#analysisResult = pyqtSignal(dict)
#sloppy = pyqtSignal(bool)
#mySignals = customSignals()
class col:
header = ''
position = 0
tooltip = ''
class FloatDelegate(QItemDelegate):
def __init__(self, sigFigs, parent=None):
QItemDelegate.__init__(self, parent=parent)
self.sigFigs = sigFigs
def paint(self, painter, option, index):
value = index.model().data(index, Qt.DisplayRole)
try:
number = float(value)
painter.drawText(option.rect, Qt.AlignLeft|Qt.AlignVCenter, MainWindow.to_precision(value,self.sigFigs))
except :
QItemDelegate.paint(self, painter, option, index)
class MainWindow(QMainWindow):
workingDirectory = ''
fileNames = []
supportedExtensions = ['*.csv','*.tsv','*.txt','*.liv1','*.liv2','*.div1','*.div2', '*.h5']
bounds = {}
bounds['I0'] = [0, np.inf]
bounds['Iph'] = [0, np.inf]
bounds['Rs'] = [0, np.inf]
bounds['Rsh'] = [0, np.inf]
bounds['n'] = [0, np.inf]
symbolCalcsNotDone = True
upperVLim = float('inf')
lowerVLim = float('-inf')
analyzer = None
uid = 0 # unique identifier associated with each file
# for table
#rows = 0 #this variable keepss track of how many rows there are in the results table
cols = OrderedDict()
#nextRow = 0
def closeEvent(self, event):
pass
#self.pool.shutdown(wait=False)
def __init__(self, analyzer):
QMainWindow.__init__(self)
self.settings = QSettings(QSettings.IniFormat, QSettings.UserScope, "mutovis", "batch-iv-analysis")
print('Using settings file: {:}'.format(self.settings.fileName()))
self.analyzer = analyzer
#how long status messages show for
self.messageDuration = 2500#ms
# Set up the user interface from Designer.
self.ui = Ui_batch_iv_analysis()
self.ui.setupUi(self)
self.ui.tableWidget.setItemDelegate(FloatDelegate(4))
# populate column headers
thisKey = 'plotBtn'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Draw Plot'
self.cols[thisKey].tooltip = 'Click this button to draw a plot for that row'
thisKey = 'exportBtn'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Export'
self.cols[thisKey].tooltip = 'Click this button to export\ninterpolated data points from fits'
thisKey = 'file'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'File'
self.cols[thisKey].tooltip = 'File name\nHover to see header from data file'
thisKey = 'substrate'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Subs'
self.cols[thisKey].tooltip = 'Substrate position'
thisKey = 'pixel'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Pix'
self.cols[thisKey].tooltip = 'Pixel number'
thisKey = 'ssPCE'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'ssPCE\n[%]'
self.cols[thisKey].tooltip = 'Final value taken during max power point tracking stage'
thisKey = 'ssVoc'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'ssV_oc\n[mV]'
self.cols[thisKey].tooltip = 'Final value taken during V_oc dwell stage'
thisKey = 'ssJsc'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'ssJ_sc\n[mA/cm^2]'
self.cols[thisKey].tooltip = 'Final value taken during J_sc dwell stage'
thisKey = 'ssff'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'ssFF\n[%]'
self.cols[thisKey].tooltip = 'Fill factor as found from the "steady state" Mpp, V_oc and I_sc'
thisKey = 'direction'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Dir'
self.cols[thisKey].tooltip = 'Scan direction'
thisKey = 'pce_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'PCE\n[%]'
self.cols[thisKey].tooltip = 'Power conversion efficiency as found from spline fit'
thisKey = 'pmax_a_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'P_max\n[mW/cm^2]'
self.cols[thisKey].tooltip = 'Maximum power density as found from spline fit'
thisKey = 'voc_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'V_oc\n[mV]'
self.cols[thisKey].tooltip = 'Open-circuit voltage as found from spline fit I=0 crossing'
thisKey = 'jsc_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_sc\n[mA/cm^2]'
self.cols[thisKey].tooltip = 'Short-circuit current density as found from spline spline fit V=0 crossing'
thisKey = 'ff_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'FF\n[%]'
self.cols[thisKey].tooltip = 'Fill factor as found from spline fit'
thisKey = 'area'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Area\n[cm^2]'
self.cols[thisKey].tooltip = 'Device area'
thisKey = 'suns'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'Suns\n'
self.cols[thisKey].tooltip = 'Illumination intensity'
thisKey = 'vmax_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'V_max\n[mV]'
self.cols[thisKey].tooltip = 'Voltage at maximum power point as found from spline fit'
thisKey = 'isc_spline'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'I_sc\n[mA]'
self.cols[thisKey].tooltip = 'Short-circuit current as found from spline V=0 crossing'
thisKey = 'SSE'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'SSE\n[mA^2]'
self.cols[thisKey].tooltip = 'Sum of the square of the errors between the data points and the fit to the char. eqn. (a measure of fit goodness)'
thisKey = 'n'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'n'
self.cols[thisKey].tooltip = 'Diode ideality factor as found from characteristic equation fit'
thisKey = 'rs_a'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'R_s\n[ohm*cm^2]'
self.cols[thisKey].tooltip = 'Specific series resistance as found from characteristic equation fit'
thisKey = 'rsh_a'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'R_sh\n[ohm*cm^2]'
self.cols[thisKey].tooltip = 'Specific shunt resistance as found from characteristic equation fit'
thisKey = 'jph'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_ph\n[mA/cm^2]'
self.cols[thisKey].tooltip = 'Photogenerated current density as found from characteristic equation fit'
thisKey = 'j0'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_0\n[nA/cm^2]'
self.cols[thisKey].tooltip = 'Reverse saturation current density as found from characteristic equation fit'
thisKey = 'pce_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'PCE_fit\n[%]'
self.cols[thisKey].tooltip = 'Power conversion efficiency as found from characteristic equation fit'
# thisKey = 'pmax_fit'
# self.cols[thisKey] = col()
# self.cols[thisKey].header = 'P_max_fit\n[mW]'
# self.cols[thisKey].tooltip = 'Maximum power as found from characteristic equation fit'
thisKey = 'pmax_a_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'P_max_fit\n[mW/cm^2]'
self.cols[thisKey].tooltip = 'Maximum power density as found from characteristic equation fit'
thisKey = 'vmax_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'V_max_fit\n[mV]'
self.cols[thisKey].tooltip = 'Voltage at maximum power point as found from characteristic equation fit'
thisKey = 'voc_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'V_oc_fit\n[mV]'
self.cols[thisKey].tooltip = 'Open-circuit voltage as found from characteristic equation fit I=0 crossing'
thisKey = 'ff_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'FF_fit\n[%]'
self.cols[thisKey].tooltip = 'Fill factor as found from characteristic equation fit'
thisKey = 'jsc_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_sc_fit\n[mA/cm^2]'
self.cols[thisKey].tooltip = 'Short-circuit current density as found from characteristic equation fit V=0 crossing'
thisKey = 'isc_fit'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'I_sc_fit\n[mA]'
self.cols[thisKey].tooltip = 'Short-circuit current as found from characteristic equation fit V=0 crossing'
thisKey = 'iph'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'I_ph\n[mA]'
self.cols[thisKey].tooltip = 'Photogenerated current as found from characteristic equation fit'
thisKey = 'jph'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_ph\n[mA/cm^2]'
self.cols[thisKey].tooltip = 'Photogenerated current density as found from characteristic equation fit'
thisKey = 'i0'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'I_0\n[nA]'
self.cols[thisKey].tooltip = 'Reverse saturation current as found from characteristic equation fit'
thisKey = 'j0'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'J_0\n[nA/cm^2]'
self.cols[thisKey].tooltip = 'Reverse saturation current density as found from characteristic equation fit'
thisKey = 'rs'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'R_s\n[ohm]'
self.cols[thisKey].tooltip = 'Series resistance as found from characteristic equation fit'
thisKey = 'rsh'
self.cols[thisKey] = col()
self.cols[thisKey].header = 'R_sh\n[ohm]'
self.cols[thisKey].tooltip = 'Shunt resistance as found from characteristic equation fit'
# load setting for lower voltage cuttoff
if not self.settings.contains('lowerVoltageCutoff'):
self.ui.lowerVoltageCutoffLineEdit.setText('-inf')
self.settings.setValue('lowerVoltageCutoff','-inf')
else:
self.ui.lowerVoltageCutoffLineEdit.setText(self.settings.value('lowerVoltageCutoff'))
self.lowerVLim=float(self.settings.value('lowerVoltageCutoff'))
self.ui.lowerVoltageCutoffLineEdit.editingFinished.connect(self.handleLowerLimChange)
# load setting for upper voltage cuttoff
if not self.settings.contains('upperVoltageCutoff'):
self.ui.upperVoltageCutoffLineEdit.setText('inf')
self.settings.setValue('upperVoltageCutoff','inf')
else:
self.ui.upperVoltageCutoffLineEdit.setText(self.settings.value('upperVoltageCutoff'))
self.upperVLim=float(self.settings.value('upperVoltageCutoff'))
self.ui.upperVoltageCutoffLineEdit.editingFinished.connect(self.handleUpperLimChange)
# load setting for fast vs accurate calculations
if not self.settings.contains('fastAndSloppy'):
self.ui.doFastAndSloppyMathCheckBox.setChecked(True)
self.settings.setValue('fastAndSloppy',True)
else:
self.ui.doFastAndSloppyMathCheckBox.setChecked(self.settings.value('fastAndSloppy') == 'true')
self.ui.doFastAndSloppyMathCheckBox.stateChanged.connect(self.handleMathChange)
# load setting for multiprocessing
if not self.settings.contains('multiprocessing'):
self.ui.useMultithreadingModeCheckBox.setChecked(False)
self.settings.setValue('multiprocessing',False)
else:
value = (self.settings.value('multiprocessing') == 'true')
self.ui.useMultithreadingModeCheckBox.setChecked(value)
self.ui.analysisThreadsSpinBox.setEnabled(value)
self.ui.useMultithreadingModeCheckBox.stateChanged.connect(self.handleMultiprocessingChange)
# load setting for fitting eqn or not
if not self.settings.contains('fitToEqn'):
self.ui.attemptCharEqnFitCheckBox.setChecked(False)
self.settings.setValue('fitToEqn',False)
else:
self.ui.attemptCharEqnFitCheckBox.setChecked(self.settings.value('fitToEqn') == 'true')
self.ui.attemptCharEqnFitCheckBox.stateChanged.connect(self.handleEqnFitChange)
# set defaults
I0_lb_string = "0" if not self.settings.contains('I0_lb') else self.settings.value('I0_lb')
Iph_lb_string = "0" if not self.settings.contains('Iph_lb') else self.settings.value('Iph_lb')
Rs_lb_string = "0" if not self.settings.contains('Rs_lb') else self.settings.value('Rs_lb')
Rsh_lb_string = "0" if not self.settings.contains('Rsh_lb') else self.settings.value('Rsh_lb')
n_lb_string = "0" if not self.settings.contains('n_lb') else self.settings.value('n_lb')
I0_ub_string = "inf" if not self.settings.contains('I0_ub') else self.settings.value('I0_ub')
Iph_ub_string = "inf" if not self.settings.contains('Iph_ub') else self.settings.value('Iph_ub')
Rs_ub_string = "inf" if not self.settings.contains('Rs_ub') else self.settings.value('Rs_ub')
Rsh_ub_string = "inf" if not self.settings.contains('Rsh_ub') else self.settings.value('Rsh_ub')
n_ub_string = "inf" if not self.settings.contains('n_ub') else self.settings.value('n_ub')
if self.settings.contains('fitMethod'):
self.ui.fitMethodComboBox.setCurrentIndex(int(self.settings.value('fitMethod')))
else:
self.settings.setValue('fitMethod',self.ui.fitMethodComboBox.currentIndex())
if self.settings.contains('verbosity'):
self.ui.verbositySpinBox.setValue(int(self.settings.value('verbosity')))
else:
self.settings.setValue('verbosity',self.ui.verbositySpinBox.value())
if self.settings.contains('threads'):
self.ui.analysisThreadsSpinBox.setValue(int(self.settings.value('threads')))
else:
self.settings.setValue('threads',self.ui.analysisThreadsSpinBox.value())
self.ui.analysisThreadsSpinBox.valueChanged.connect(self.handleNThreadChange)
self.bounds['I0'][0] = np.float(I0_lb_string)
self.bounds['Iph'][0] = np.float(Iph_lb_string)
self.bounds['Rs'][0] = np.float(Rs_lb_string)
self.bounds['Rsh'][0] = np.float(Rsh_lb_string)
self.bounds['n'][0] = np.float(n_lb_string)
self.bounds['I0'][1] = np.float(I0_ub_string)
self.bounds['Iph'][1] = np.float(Iph_ub_string)
self.bounds['Rs'][1] = np.float(Rs_ub_string)
self.bounds['Rsh'][1] = np.float(Rsh_ub_string)
self.bounds['n'][1] = np.float(n_ub_string)
self.ui.I0_lb.setText(I0_lb_string)
self.ui.Iph_lb.setText(Iph_lb_string)
self.ui.Rs_lb.setText(Rs_lb_string)
self.ui.Rsh_lb.setText(Rsh_lb_string)
self.ui.n_lb.setText(n_lb_string)
self.ui.I0_ub.setText(I0_ub_string)
self.ui.Iph_ub.setText(Iph_ub_string)
self.ui.Rs_ub.setText(Rs_ub_string)
self.ui.Rsh_ub.setText(Rsh_ub_string)
self.ui.n_ub.setText(n_ub_string)
# connect the bounds change handler
self.ui.I0_lb.editingFinished.connect(self.handleConstraintsChange)
self.ui.Iph_lb.editingFinished.connect(self.handleConstraintsChange)
self.ui.Rs_lb.editingFinished.connect(self.handleConstraintsChange)
self.ui.Rsh_lb.editingFinished.connect(self.handleConstraintsChange)
self.ui.n_lb.editingFinished.connect(self.handleConstraintsChange)
self.ui.I0_ub.editingFinished.connect(self.handleConstraintsChange)
self.ui.Iph_ub.editingFinished.connect(self.handleConstraintsChange)
self.ui.Rs_ub.editingFinished.connect(self.handleConstraintsChange)
self.ui.Rsh_ub.editingFinished.connect(self.handleConstraintsChange)
self.ui.n_ub.editingFinished.connect(self.handleConstraintsChange)
self.ui.fitMethodComboBox.currentIndexChanged.connect(self.handleFitMethodChange)
self.ui.resetSettingsButton.clicked.connect(self.resetDefaults)
self.ui.verbositySpinBox.valueChanged.connect(self.handleVerbosityChange)
#insert cols
for item in self.cols:
blankItem = QTableWidgetItem()
thisCol = list(self.cols.keys()).index(item)
self.ui.tableWidget.insertColumn(thisCol)
blankItem.setToolTip(self.cols[item].tooltip)
blankItem.setText(self.cols[item].header)
self.ui.tableWidget.setHorizontalHeaderItem(thisCol,blankItem)
#file system watcher
self.watcher = QFileSystemWatcher(self)
self.watcher.directoryChanged.connect(self.handleWatchUpdate)
self.watcher.fileChanged.connect(self.handleH5FileUpdate)
#connect signals generated by gui elements to proper functions
self.ui.actionOpen.triggered.connect(self.openCall)
self.ui.actionEnable_Watching.triggered.connect(self.watchCall)
self.ui.actionSave.triggered.connect(self.handleSave)
self.ui.actionWatch_2.triggered.connect(self.handleWatchAction)
self.ui.statusbar.messageChanged.connect(self.statusChanged)
self.ui.actionClear_Table.triggered.connect(self.clearTableCall)
#override showMessage for the statusbar
self.oldShowMessage = self.ui.statusbar.showMessage
self.ui.statusbar.showMessage = self.myShowMessage
self.mySignals = customSignals()
self.mySignals.newFitResult.connect(self._processFitResult)
#self.mySignals.populateRow.connect(self.populateRow)
#mySignals.sloppy.connect(self.handleMathFinished)
#mySignals.analysisResult.connect(self.processFitResult)
#if self.analyzer.isFastAndSloppy is None:
# self.analyzer.__dict__['isFastAndSloppy'] = self.ui.doFastAndSloppyMathCheckBox.isChecked()
#if self.analyzer.poolWorkers is None:
# self.analyzer.__dict__['poolWorkers'] = self.ui.analysisThreadsSpinBox.value()
#if self.analyzer.multiprocess is None:
# self.analyzer.__dict__['multiprocess'] = self.ui.useMultithreadingModeCheckBox.isChecked()
self.analyzer.__dict__['multiprocess'] = self.ui.useMultithreadingModeCheckBox.isChecked()
self.analyzer.__dict__['poolWorkers'] = self.ui.analysisThreadsSpinBox.value()
self.analyzer.__dict__['isFastAndSloppy'] = self.ui.doFastAndSloppyMathCheckBox.isChecked()
self.analyzer.setup()
#self.analyzer = ivAnalyzer(beFastAndSloppy=beFastAndSloppy, multiprocess=multiprocess, poolWorkers=poolWorkers)
# do symbolic calcs now if needed
#if self.ui.attemptCharEqnFitCheckBox.isChecked():
# if self.multiprocess:
# submission = self.pool.submit(ivAnalyzer)
# #submission = self.pool.submit(self.analyzer.doSymbolicManipulations,fastAndSloppy=self.ui.doFastAndSloppyMathCheckBox.isChecked())
# submission.add_done_callback(self.handleMathFinished)
# #self.analyzer.doSymbolicManipulations(fastAndSloppy=self.ui.doFastAndSloppyMathCheckBox.isChecked())
# #doSymbolicManipulations(fastAndSloppy=self.ui.doFastAndSloppyMathCheckBox.isChecked())
# else:
# self.handleMathFinished(ivAnalyzer())
#def handleMathFinished(self,submission):
#def handleMathFinished(self,thing):
#self.symbolCalcsNotDone = False
#if self.multiprocess:
# self.analyzer = thing.result()
#else:
# self.analyzer = thing
#print("One-time symbolic manipulations done!")
#self.analyzer.numericalize(beFastAndSloppy=self.ui.doFastAndSloppyMathCheckBox.isChecked())
#print("Fast and sloppy mode =", self.analyzer.isFastAndSloppy)
#print(self.analyzer)
#self.analyzer.I_eqn = submission.result()['I_eqn']
#self.analyzer.P_prime = submission.result()['P_prime']
#self.analyzer.slns = submission.result()['slns']
#self.analyzer.electricalModelVarsOnly = submission.result()['electricalModelVarsOnly']
#print(self.analyzer.I_eqn)
def distillAnalysisParams(self):
analysisParams = {}
analysisParams['lowerVLim'] = self.lowerVLim
analysisParams['upperVLim'] = self.upperVLim
analysisParams['doFit'] = self.ui.attemptCharEqnFitCheckBox.isChecked()
analysisParams['bounds'] = self.bounds
analysisParams['uid'] = self.uid # unique identifier
self.uid = self.uid + 1
if self.ui.fitMethodComboBox.currentIndex() == 0:
analysisParams['method'] = 'trf'
elif self.ui.fitMethodComboBox.currentIndex() == 1:
analysisParams['method'] = 'dogbox'
elif self.ui.fitMethodComboBox.currentIndex() == 2:
analysisParams['method'] = 'lm'
analysisParams['verbose'] = self.ui.verbositySpinBox.value()
return analysisParams
def updatePoolStatus(self):
self.myShowMessage(self.analyzer.getPoolStatusString())
def resetDefaults(self):
self.ui.attemptCharEqnFitCheckBox.setChecked(True)
self.ui.doFastAndSloppyMathCheckBox.setChecked(True)
self.ui.lowerVoltageCutoffLineEdit.setText('-inf')
self.ui.lowerVoltageCutoffLineEdit.editingFinished.emit()
self.ui.upperVoltageCutoffLineEdit.setText('inf')
self.ui.upperVoltageCutoffLineEdit.editingFinished.emit()
self.ui.fitMethodComboBox.setCurrentIndex(2)
self.ui.verbositySpinBox.setValue(0)
self.ui.analysisThreadsSpinBox.setValue(8)
self.ui.analysisThreadsSpinBox.setEnabled(True)
self.ui.useMultithreadingModeCheckBox.setChecked(True)
# let's make sure to print messages for the statusbar also in the console
def myShowMessage(*args, **kwargs):
print('Menubar Message:',args[1])
return args[0].oldShowMessage(*args[1:], **kwargs)
def exportInterp(self,row):
# these will be the col names for the output file
colnames = []
cols = () # data columns for output file
thisGraphData = self.ui.tableWidget.item(row,list(self.cols.keys()).index('plotBtn')).data(Qt.UserRole)
colname = 'Interpolated Voltage [V]'
fitX = thisGraphData["fitX"]
cols = cols + ([(colname,x) for x in fitX], )
colnames.append(colname)
colname = 'Spline Fit Current Density [mA/cm^2]'
splineY = thisGraphData["splineY"]
cols = cols + ([(colname,x) for x in splineY], )
colnames.append(colname)
colname = 'Char. Eqn. Fit Current Density [mA/cm^2]'
modelY = thisGraphData["modelY"]
if not np.isnan(modelY[0]): # only include this col if the fit has been done
cols = cols + ([(colname,x) for x in modelY], )
colnames.append(colname)
colname = 'Device Voltage[V]'
v = thisGraphData["v"]
cols = cols + ([(colname,x) for x in v], )
colnames.append(colname)
colname = 'Measured CurrentDensity[mA/cm^2]'
j = thisGraphData["j"]
cols = cols + ([(colname,x) for x in j], )
colnames.append(colname)
destinationFolder = os.path.join(self.workingDirectory,'exports')
QDestinationFolder = QDir(destinationFolder)
if not QDestinationFolder.exists():
QDir().mkdir(destinationFolder)
# data origin
file = str(self.ui.tableWidget.item(row,list(self.cols.keys()).index('file')).text())
subs = str(self.ui.tableWidget.item(row,list(self.cols.keys()).index('substrate')).text())
pix = str(self.ui.tableWidget.item(row,list(self.cols.keys()).index('pixel')).text())
if subs == '?':
subs = ''
else:
subs = '_' + subs
if pix == '?':
pix = ''
else:
pix = '_' + pix
saveFile = os.path.join(destinationFolder,file+subs+pix+'.csv')
# get the column data ready to be written
data = [dict(filter(None, a)) for a in zip_longest(*cols)]
try:
with open(saveFile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=colnames)
writer.writeheader()
writer.writerows(data)
self.goodMessage()
self.ui.statusbar.showMessage("Exported " + saveFile,5000)
except:
self.badMessage()
self.ui.statusbar.showMessage("Could not export " + saveFile,self.messageDuration)
def handleUpperLimChange(self):
lineEdit = self.sender()
try:
self.upperVLim = float(lineEdit.text())
self.settings.setValue('upperVoltageCutoff',lineEdit.text())
except:
pass
def handleFitMethodChange(self):
comboBox = self.sender()
self.settings.setValue('fitMethod',comboBox.currentIndex())
def handleVerbosityChange(self):
spinBox = self.sender()
self.settings.setValue('verbosity',spinBox.value())
def handleNThreadChange(self):
spinBox = self.sender()
value = spinBox.value()
self.settings.setValue('threads',value)
self.analyzer.poolWorkers = value
def handleConstraintsChange(self):
lineEdit = self.sender()
name = lineEdit.objectName()
nameSplit = name.split('_')
try:
text = lineEdit.text()
value = float(text)
if nameSplit[1] == 'lb':
self.bounds[nameSplit[0]][0] = value
else: # upper bound
self.bounds[nameSplit[0]][1] = value
self.settings.setValue(name,text)
except:
pass
def handleLowerLimChange(self):
lineEdit = self.sender()
try:
self.lowerVLim = float(lineEdit.text())
self.settings.setValue('lowerVoltageCutoff',lineEdit.text())
except:
pass
def handleMathChange(self):
checkBox = self.sender()
self.settings.setValue('fastAndSloppy',checkBox.isChecked())
self.analyzer.isFastAndSloppy = checkBox.isChecked()
#self.analyzer.numericalize(beFastAndSloppy=checkBox.isChecked())
#print("Fast and sloppy mode =", self.analyzer.isFastAndSloppy)
def handleEqnFitChange(self):
checkBox = self.sender()
self.settings.setValue('fitToEqn',checkBox.isChecked())
def handleMultiprocessingChange(self):
checkBox = self.sender()
value = checkBox.isChecked()
self.settings.setValue('multiprocessing',value)
self.ui.analysisThreadsSpinBox.setEnabled(value)
self.analyzer.multiprocess = value
def handleButton(self):
btn = self.sender()
#kinda hacky:
row = self.ui.tableWidget.indexAt(btn.pos()).row()
col = self.ui.tableWidget.indexAt(btn.pos()).column()
if col == 0:
self.rowGraph(row)
elif col == 1:
self.exportInterp(row)
elif col == self.getCol('ssVoc'):
self.ssVocGraph(row)
elif col == self.getCol('ssJsc'):
self.ssJscGraph(row)
elif col == self.getCol('ssPCE'):
self.mpptGraph(row)
def ssVocGraph(self, row):
thisGraphData = self.ui.tableWidget.item(row, self.getCol('plotBtn')).data(Qt.UserRole)
filename = str(self.ui.tableWidget.item(row, self.getCol('file')).text())
substrate = str(self.ui.tableWidget.item(row, self.getCol('substrate')).text())
pixel = str(self.ui.tableWidget.item(row, self.getCol('pixel')).text())
measurements = thisGraphData['ssVoc']
v = np.array([e[0] for e in measurements])
i = np.array([e[1] for e in measurements])
t = np.array([e[2] for e in measurements])
s = np.array([int(e[3]) for e in measurements])
x = t - t[0]
y = abs(v * 1000)
plt.plot(x, y, c='b', marker='o', ls="None",label='Voc')
plt.title("{:}, Pixel {:}{:}".format(filename, substrate, pixel))
plt.ylabel('Open-circuit voltage [mV]')
plt.xlabel('Time [s]')
plt.grid()
plt.show()
def ssJscGraph(self, row):
thisGraphData = self.ui.tableWidget.item(row, self.getCol('plotBtn')).data(Qt.UserRole)
filename = str(self.ui.tableWidget.item(row, self.getCol('file')).text())
substrate = str(self.ui.tableWidget.item(row, self.getCol('substrate')).text())
pixel = str(self.ui.tableWidget.item(row, self.getCol('pixel')).text())
area = self.ui.tableWidget.item(row, self.getCol('area')).data(Qt.UserRole)
areacm = area * 1e4
measurements = thisGraphData['ssIsc']
v = np.array([e[0] for e in measurements])
i = np.array([e[1] for e in measurements])
t = np.array([e[2] for e in measurements])
s = np.array([int(e[3]) for e in measurements])
x = t - t[0]
y = abs(i * 1000) / areacm
plt.plot(x, y, c='b', marker='o', ls="None",label='Jsc')
plt.title("{:}, Pixel {:}{:}".format(filename, substrate, pixel))
plt.ylabel('Short-circuit current density [mA/cm^2]')
plt.xlabel('Time [s]')
plt.grid()
plt.show()
def mpptGraph(self, row):
thisGraphData = self.ui.tableWidget.item(row, self.getCol('plotBtn')).data(Qt.UserRole)
filename = str(self.ui.tableWidget.item(row, self.getCol('file')).text())
substrate = str(self.ui.tableWidget.item(row, self.getCol('substrate')).text())
pixel = str(self.ui.tableWidget.item(row, self.getCol('pixel')).text())
area = self.ui.tableWidget.item(row, self.getCol('area')).data(Qt.UserRole)
areacm = area * 1e4
measurements = thisGraphData['mppt']
v = np.array([e[0] for e in measurements])
i = np.array([e[1] for e in measurements])
t = np.array([e[2] for e in measurements])
s = np.array([int(e[3]) for e in measurements])
x = t - t[0]
y = abs(i*v * 1000) / areacm
fig, ax1 = plt.subplots()
ax1.plot(x, y, c='b', marker='o', ls="None",label='power_density')
ax1.set_ylabel('Power Density [mW/cm^2]', color='b')
ax1.tick_params('y', colors='b')
ax1.grid(b=True, axis='y',color='b', alpha=0.3)
ax1.grid(b=True, axis='x')
ax2 = ax1.twinx()
ax2.plot(x, v, c='r', marker='.', ls="None",label='voltage')
ax2.set_ylabel('Voltage [V]', color='r')
ax2.tick_params('y', colors='r')
ax2.grid(b=True, axis='y',color='r', alpha=0.3)
plt.title("{:}, Pixel {:}{:}".format(filename, substrate, pixel))
plt.xlabel('Time [s]')
# plt.grid()
plt.show()
def rowGraph(self,row):
thisGraphData = self.ui.tableWidget.item(row, self.getCol('plotBtn')).data(Qt.UserRole)
filename = str(self.ui.tableWidget.item(row, self.getCol('file')).text())
substrate = str(self.ui.tableWidget.item(row, self.getCol('substrate')).text())
pixel = str(self.ui.tableWidget.item(row, self.getCol('pixel')).text())
direction = str(self.ui.tableWidget.item(row, self.getCol('direction')).text())
if 'ssJscValue' in thisGraphData:
ss_current = True
else:
ss_current = False
if 'ssJmpp' in thisGraphData and 'ssVmpp' in thisGraphData:
ss_power = True
else:
ss_power = False
if 'ssVocValue' in thisGraphData:
ss_voltage = True
else:
ss_voltage = False
v = thisGraphData["v"]
i = thisGraphData["j"]
if direction == 'Fwd.':
plt.plot(v, i, c='b', marker='o', ls="None",label='J-V Data (Fwd.)')
else:
plt.plot(v, i, c='r', marker='o', ls="None",label='J-V Data (Rev.)')
if ss_power:
plt.scatter(abs(thisGraphData["ssVmpp"]), 1000*abs(thisGraphData["ssJmpp"]), c='g',marker='x',s=100)
if ss_voltage:
plt.scatter(abs(thisGraphData["ssVocValue"]), 0, c='g',marker='x',s=100)
if ss_current:
plt.scatter(0, 1000*abs(thisGraphData["ssJscValue"]), c='g',marker='x',s=100)
fitX = thisGraphData["fitX"]
modelY = thisGraphData["modelY"]
modelY = np.array(thisGraphData["modelY"]).astype(complex)
splineY = thisGraphData["splineY"]
if not np.isnan(modelY[0]):
plt.plot(fitX, modelY,c='k', label='CharEqn Best Fit')
plt.plot(fitX, splineY,c='g', label='Spline Fit')
plt.autoscale(axis='x', tight=True)
plt.grid()
if ss_voltage:
plt.annotate(
abs(thisGraphData["ssVocValue"]).__format__('0.4f')+ ' V',
xy = (abs(thisGraphData["ssVocValue"]), 0), xytext = (40, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
if ss_current:
plt.annotate(
abs(float(1000*thisGraphData["ssJscValue"])).__format__('0.4f') + ' mA/cm^2',
xy = (0, abs(1000*thisGraphData["ssJscValue"])), xytext = (40, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
if ss_power:
plt.annotate(
abs(float(1000*thisGraphData["ssJmpp"]*thisGraphData["ssVmpp"])).__format__('0.4f') + 'mW/cm^2 @(' + abs(float(thisGraphData["ssVmpp"])).__format__('0.4f') + ',' + abs(float(thisGraphData["ssJmpp"])).__format__('0.4f') + ')',
xy = (abs(thisGraphData["ssVmpp"]), abs(1000*thisGraphData["ssJmpp"])), xytext = (80, 40),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.ylabel('Current Density [mA/cm^2]')
plt.xlabel('Voltage [V]')
plt.title("{:}, Pixel {:}{:}".format(filename, substrate, pixel))
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, loc=3)
# ax.grid()
plt.draw()
plt.show()
# this is how we save the table data to a .csv or .mat file
def handleSave(self):
if self.settings.contains('lastFolder'):
saveDir = self.settings.value('lastFolder')
else:
saveDir = '.'
path = QFileDialog.getSaveFileName(self, caption='Set Export File',filter="Comma separated values (*.csv);;MATLAB formatted data (*.mat)", directory=saveDir)
if str(path[0]) == '':
return
elif '.csv' in str(path[1]): # let's write a .csv
fullPath = str(path[0])
if not fullPath.endswith('.csv'):
fullPath = fullPath + '.csv'
with open(fullPath, 'w',newline='') as stream:
writer = csv.writer(stream, dialect="excel")
rowdata = []
for column in range(self.ui.tableWidget.columnCount()):
item = self.ui.tableWidget.horizontalHeaderItem(column)
if item is not None:
rowdata.append(str(item.text()).replace('\n',' '))
else:
rowdata.append(b'')
writer.writerow(rowdata[2:])
for row in range(self.ui.tableWidget.rowCount()):
rowdata = []
for column in range(self.ui.tableWidget.columnCount()):
item = self.ui.tableWidget.item(row, column)
if item is not None:
rowdata.append(str(item.text()))
else:
rowdata.append('')
writer.writerow(rowdata[2:])
stream.close()
print('Table data successfully written to', fullPath)
elif '.mat' in str(path[1]):# let's write a .mat file
fullPath = str(path[0])
if not fullPath.endswith('.mat'):
fullPath = fullPath + '.mat'
#let's make a dict out of the table:
tableDict = {}
fieldsToInclude= ('pce_spline','pmax_a_spline','voc_spline','isc_spline','ff_spline','vmax_spline','SSE','pce_fit','pmax_a_fit','voc_fit','isc_fit','ff_fit','vmax_fit','rs','rsh','iph','i0','n','area','suns')
#how many padding zeros should we use for the MATLAB variable names?
ndigits = str(len(str(self.ui.tableWidget.rowCount())))
for row in range(self.ui.tableWidget.rowCount()):
rowDict = {}
rowDict['file'] = self.ui.tableWidget.item(row, list(self.cols.keys()).index('file')).data(Qt.DisplayRole)
for field in fieldsToInclude:
rowDict[field] = self.ui.tableWidget.item(row, list(self.cols.keys()).index(field)).data(Qt.UserRole)
rowDict['i'] = self.ui.tableWidget.item(row, list(self.cols.keys()).index('plotBtn')).data(Qt.UserRole)['i']/rowDict['area']
rowDict['v'] = self.ui.tableWidget.item(row, list(self.cols.keys()).index('plotBtn')).data(Qt.UserRole)['v']
tableDict['thing'+format(row, '0'+ndigits)] = rowDict
# save our dict as a .mat file
sio.savemat(fullPath, tableDict)
print('Table data successfully written to', fullPath)
# takes cell data and modifies it for display
def sanitizeRow(self,row):
ignoreCols = ['plotBtn','exportBtn','file']
cols = list(self.cols.keys())
for coli in range(len(cols)):
thisCol = cols[coli]
if thisCol not in ignoreCols:
thisTableItem = self.ui.tableWidget.item(row,coli)
if thisTableItem is not None:
value = thisTableItem.data(Qt.UserRole)
if value is not None and not np.isnan(value):
saneValue = float(np.real(value))
if thisCol == 'SSE':
displayValue = saneValue*1000**2 # A^2 to mA^2
elif thisCol in ['ff_spline','ff_fit','pce_spline','ssPCE','ssff','pce_fit']:
displayValue = saneValue*100 # to percent
elif thisCol in ['ssVoc','voc_spline','voc_fit','vmax_spline','vmax_fit','isc_spline','isc','iph']:
displayValue = saneValue*1e3 # to milli-
elif thisCol in ['jsc_spline','jsc','ssJsc','jph','pmax_a_spline','pmax_a_fit','jsc_fit']:
displayValue = saneValue*1e3*1e-4 # to milli- per cm^2
elif thisCol in ['area','rs_a','rsh_a']:
displayValue = saneValue*1e4 #m^2 to cm^2
elif thisCol in ['i0']:
displayValue = saneValue*1e9 # to nano-
elif thisCol in ['j0']:
displayValue = saneValue*1e9 # to nano-
displayValue = saneValue*1e-4 #1/m^2 to 1/cm^2
else:
displayValue = saneValue
displayValue = MainWindow.to_precision(displayValue,4)
self.ui.tableWidget.item(row,coli).setData(Qt.DisplayRole,float(displayValue))
self.ui.tableWidget.resizeColumnToContents(coli)
self.ui.tableWidget.viewport().update()
# returns table column number given name
def getCol(self,colName):
return list(self.cols.keys()).index(colName)
# returns row number associated with a unique identifier
def getRowByUID(self,uid):
nRows = self.ui.tableWidget.rowCount()
fileCol = self.getCol('file')
row = None
for i in range(nRows):
thisCellItem = self.ui.tableWidget.item(i,fileCol)
if thisCellItem.data(Qt.UserRole) == uid:
row = i
break
return row
def clearTableCall(self):
for ii in range(self.ui.tableWidget.rowCount()):
self.ui.tableWidget.removeRow(0)
#self.ui.tableWidget.clear()
#self.ui.tableWidget.clearContents()
self.fileNames = []
def newFiles(self, fullPaths):
self.analyzer.processFiles(fullPaths, self.processFitResult, self.primeRow)
def primeRow(self, fullPath, fileData):
"""primes a new row in the table"""
#analysisParams = []
#for i in range(len(fullPaths)):
# grab settings from gui
#analysisParams.append(self.distillAnalysisParams())
params = self.distillAnalysisParams()
#wait here for the file to be completely written to disk and closed before trying to read it
fi = QFileInfo(fullPath)
while (not fi.isWritable()):
time.sleep(0.01)
fi.refresh()
# insert filename into table immediately
thisRow = self.ui.tableWidget.rowCount()
self.ui.tableWidget.setSortingEnabled(False) # fix strange sort behavior
self.ui.tableWidget.insertRow(thisRow)
for ii in range(self.ui.tableWidget.columnCount()):
self.ui.tableWidget.setItem(thisRow,ii,QTableWidgetItem())
fileName = os.path.basename(fullPath)
self.tableInsert(thisRow,'file', fileName, role=Qt.DisplayRole)
self.tableInsert(thisRow,'file', params['uid'])
self.tableInsert(thisRow,'substrate', fileData.substrate, role=Qt.DisplayRole)
self.tableInsert(thisRow,'pixel', fileData.pixel, role=Qt.DisplayRole)
self.tableInsert(thisRow,'direction', 'Rev.' if fileData.reverseSweep else 'Fwd.', role=Qt.DisplayRole)
areacm = fileData.area * 1e4
graphData = {}
if hasattr(fileData, 'mppt'):
graphData['mppt'] = fileData.mppt
if hasattr(fileData, 'ssVoc'):
graphData['ssVoc'] = fileData.ssVoc
if hasattr(fileData, 'ssIsc'):
graphData['ssIsc'] = fileData.ssIsc
if hasattr(fileData, 'Impp'):
# graphData['ssImpp'] = fileData.Impp
graphData['ssJmpp'] = fileData.Impp / areacm
if hasattr(fileData, 'Vmpp'):
graphData['ssVmpp'] = fileData.Vmpp
if hasattr(fileData, 'Voc'):
self.tableInsert(thisRow,'ssVoc', fileData.Voc)
graphData['ssVocValue'] = fileData.Voc
if hasattr(fileData, 'ssPmax'):
self.tableInsert(thisRow,'ssPCE', fileData.ssPmax / fileData.area / self.analyzer.stdIrridance / fileData.suns)
# graphData['ssPmax'] = fileData.ssPmax / fileData.area
if hasattr(fileData, 'Isc'):
self.tableInsert(thisRow,'ssJsc', fileData.Isc / fileData.area)
# graphData['ssIscValue'] = fileData.Isc
graphData['ssJscValue'] = fileData.Isc / areacm
self.tableInsert(thisRow,'plotBtn', graphData)
self.tableInsert(thisRow,'suns', fileData.suns)
self.tableInsert(thisRow,'area', fileData.area) # in m^2
if hasattr(fileData, 'Isc') and hasattr(fileData, 'Voc') and hasattr(fileData, 'ssPmax'):
self.tableInsert(thisRow,'ssff', abs(fileData.ssPmax/(fileData.Isc*fileData.Voc)))
self.ui.tableWidget.setSortingEnabled(True) # fix strange sort behavior
self.fileNames.append(fileName)
return params
def tableInsert(self,thisRow,colName,value,role=Qt.UserRole):
thisCol = self.getCol(colName)
thisItem = self.ui.tableWidget.item(thisRow,thisCol)
thisItem.setData(role,value)
self.ui.tableWidget.resizeColumnToContents(thisCol)
def processFitResult(self,result):
try:# this handles the multiprocessing case
if result.done():
exception = result.exception(timeout=0)
if exception is None:
result = result.result()
else:
print('Error during file processing:', result.exception(timeout=0))
return
else:
print("Somehow the future isn't 'done'")
return
except:
pass
self.mySignals.newFitResult.emit(result)
#self._processFitResult(result)
def _processFitResult(self,result):
if self.ui.useMultithreadingModeCheckBox.isChecked():
self.updatePoolStatus()
uid = result.params['uid']
thisRow = self.getRowByUID(uid)
#print('Got new fit result, UID:',result['params']['uid'])
#print(result['fitResult'])
#
#thisItem = QTableWidgetItem()
#thisItem.setData
fitData = Object()
fitData.pmax_spline = result.pmpp
fitData.vmax_spline = result.vmpp
fitData.isc_spline = result.isc
fitData.voc_spline = result.voc
fitData.row = thisRow
fitData.v = result.v
fitData.i = result.i
fitData.x = result.x
fitData.splineCurrent = result.splineCurrent
fitData.SSE = result.sse if hasattr(result,'sse') else np.nan
fitData.eqnCurrent = result.eqnCurrent if hasattr(result,'eqnCurrent') else np.array([np.nan])
fitData.n = result.n if hasattr(result,'n') else np.nan
fitData.rs = result.rs if hasattr(result,'rs') else np.nan
fitData.rsh = result.rsh if hasattr(result,'rsh') else np.nan
fitData.i0 = result.i0 if hasattr(result,'i0') else np.nan
fitData.iph = result.iph if hasattr(result,'iph') else np.nan
fitData.pmax_fit = result.pmax_fit if hasattr(result,'pmax_fit') else np.nan
fitData.isc_fit = result.isc_fit if hasattr(result,'isc_fit') else np.nan
fitData.voc_fit = result.voc_fit if hasattr(result,'voc_fit') else np.nan
fitData.vmax_fit = result.vmax_fit if hasattr(result,'vmax_fit') else np.nan
#print('got new fit result:',uid)
self.populateRow(fitData)
#self.mySignals.populateRow.emit(rowData)
#self.tableInsert(thisRow, 'pce_spline', getattr(result, 'pce'))
#item = self.ui.tableWidget.item(thisRow,self.getCol(thisThing))
#thisItem = QTableWidgetItem()
#value = result['insert'][thisThing]
#role = Qt.UserRole
#item.setData(role,value)
#insert = lambda colName,value: self.ui.tableWidget.item(thisRow,self.getCol(colName)).setData(Qt.UserRole,value)
#thisThing = 'pce_spline'
#insert(thisThing,result['insert'][thisThing])
def populateRow(self,fitData):
self.ui.tableWidget.setSortingEnabled(False) # fix strange sort behavior
# add in the export button
exportBtn = QPushButton(self.ui.tableWidget)
exportBtn.setText('Export')
exportBtn.clicked.connect(self.handleButton)
exportCol = self.getCol('exportBtn')
self.ui.tableWidget.setCellWidget(fitData.row, exportCol, exportBtn)
# add in the plot button
plotBtn = QPushButton(self.ui.tableWidget)
plotBtn.setText('Plot')
plotBtn.clicked.connect(self.handleButton)
plotCol = self.getCol('plotBtn')
self.ui.tableWidget.setCellWidget(fitData.row, plotCol, plotBtn)
if self.ui.tableWidget.item(fitData.row, plotCol).data(Qt.UserRole) == None:
graphData = {}
else:
graphData = self.ui.tableWidget.item(fitData.row, plotCol).data(Qt.UserRole)
# copy fit data over to row data
rowData = fitData
# retrieve area and intensity from the table
area = self.ui.tableWidget.item(fitData.row, self.getCol('area')).data(Qt.UserRole) # in m^2
suns = self.ui.tableWidget.item(fitData.row, self.getCol('suns')).data(Qt.UserRole)
areacm = area * 1e4 # area in cm^2
if area == 0 or suns == 0:
print ("div by zer")
return
# derived row data values:
rowData.pce_spline = rowData.pmax_spline / area / self.analyzer.stdIrridance / suns
rowData.pmax_a_spline = rowData.pmax_spline / area
rowData.ff_spline = rowData.pmax_spline / (rowData.isc_spline*rowData.voc_spline)
rowData.jsc_spline = rowData.isc_spline / area
rowData.rs_a = rowData.rs*area
rowData.rsh_a = rowData.rsh*area
rowData.jph = rowData.iph/area
rowData.j0 = rowData.i0/area
rowData.pce_fit = rowData.pmax_fit / area / self.analyzer.stdIrridance / suns
rowData.ff_fit = rowData.pmax_fit/(rowData.isc_fit*rowData.voc_fit)
rowData.jsc_fit = rowData.isc_fit/area
rowData.pmax_a_fit = rowData.pmax_fit / area
graphData["v"] = rowData.v
graphData["i"] = rowData.i * 1000 # in mA
graphData["j"] = rowData.i/areacm * 1000 # in mA/cm^2
graphData["vsTime"] = False
# graphData["Vmax"] = rowData.vmax_spline
# graphData["Imax"] = rowData.pmax_spline/rowData.vmax_spline * 1000 # in mA
# graphData["Jmax"] = rowData.pmax_spline/rowData.vmax_spline/areacm * 1000 # in mA/cm^2
# graphData["Voc"] = rowData.voc_spline
# graphData["Isc"] = rowData.isc_spline * 1000 # in mA
# graphData["Jsc"] = rowData.isc_spline/areacm * 1000 # in mA/cm^2
graphData["fitX"] = rowData.x
graphData["modelY"] = rowData.eqnCurrent/areacm * 1000 # in mA/cm^2
graphData["splineY"] = rowData.splineCurrent/areacm * 1000 # in mA/cm^2
self.ui.tableWidget.item(rowData.row, plotCol).setData(Qt.UserRole, graphData)
for key,value in rowData.__dict__.items():
colName = key
if key not in ['row','i','v','vsTime','x','splineCurrent','eqnCurrent', 'area', 'suns', 'pmax_spline', 'pmax_fit']:
self.tableInsert(rowData.row, key, value)
# add in the Voc button
thisGraphData = self.ui.tableWidget.item(rowData.row, self.getCol('plotBtn')).data(Qt.UserRole)
if 'ssVoc' in thisGraphData:
vocBtn = QPushButton(self.ui.tableWidget)
vocCol = self.getCol('ssVoc')
voc = abs(self.ui.tableWidget.item(fitData.row, vocCol).data(Qt.UserRole))
vocBtn.setText("{:}".format(MainWindow.to_precision(voc*1000,4)))
vocBtn.clicked.connect(self.handleButton)
self.ui.tableWidget.setCellWidget(rowData.row, vocCol, vocBtn)
# add in the Jsc button
if 'ssIsc' in thisGraphData:
jscBtn = QPushButton(self.ui.tableWidget)
jscCol = self.getCol('ssJsc')
jsc = abs(self.ui.tableWidget.item(fitData.row, jscCol).data(Qt.UserRole))
jscBtn.setText("{:}".format(MainWindow.to_precision(jsc*1000*1e-4,4)))
jscBtn.clicked.connect(self.handleButton)
self.ui.tableWidget.setCellWidget(rowData.row, jscCol, jscBtn)
# add in the PCE button
if 'mppt' in thisGraphData:
pceBtn = QPushButton(self.ui.tableWidget)
pceCol = self.getCol('ssPCE')
pce = self.ui.tableWidget.item(fitData.row, pceCol).data(Qt.UserRole)
pceBtn.setText("{:}".format(MainWindow.to_precision(pce*100,4)))
pceBtn.clicked.connect(self.handleButton)
self.ui.tableWidget.setCellWidget(rowData.row, pceCol, pceBtn)
self.sanitizeRow(rowData.row)
self.ui.tableWidget.setSortingEnabled(True)
def openCall(self):
#remember the last path the user opened
if self.settings.contains('lastFolder'):
openDir = self.settings.value('lastFolder')
else:
openDir = '.'
fileNames = QFileDialog.getOpenFileNames(self, directory = openDir, caption="Select one or more files to open", filter = '(*.csv *.tsv *.txt *.liv1 *.liv2 *.div1 *.div2 *.h5);;Folders (*)')
if len(fileNames[0])>0:#check if user clicked cancel
self.workingDirectory = os.path.dirname(str(fileNames[0][0]))
self.settings.setValue('lastFolder',self.workingDirectory)
fullPaths = fileNames[0]
self.newFiles(fullPaths)
if self.ui.actionEnable_Watching.isChecked():
watchedDirs = self.watcher.directories()
self.watcher.removePaths(watchedDirs)
self.watcher.addPath(self.workingDirectory)
self.handleWatchUpdate(self.workingDirectory)
#user chose file --> watch
def handleWatchAction(self):
#remember the last path th user opened
if self.settings.contains('lastFolder'):
openDir = self.settings.value('lastFolder')
else:
openDir = '.'
myDir = QFileDialog.getExistingDirectory(self,directory = openDir, caption="Select folder to watch")
if len(myDir)>0:#check if user clicked cancel
self.workingDirectory = str(myDir)
self.settings.setValue('lastFolder',self.workingDirectory)
self.ui.actionEnable_Watching.setChecked(True)
watchedDirs = self.watcher.directories()
self.watcher.removePaths(watchedDirs)
self.watcher.addPath(self.workingDirectory)
self.handleWatchUpdate(self.workingDirectory)
#user toggeled Tools --> Enable Watching
def watchCall(self):
watchedDirs = self.watcher.directories()
self.watcher.removePaths(watchedDirs)
if self.ui.actionEnable_Watching.isChecked():
if (self.workingDirectory != ''):
self.watcher.addPath(self.workingDirectory)
self.handleWatchUpdate(self.workingDirectory)
def handleH5FileUpdate(self, path):
this_file = path
file_ready = False
if this_file.endswith('.h5'):
if h5py.is_hdf5(this_file):
try:
with h5py.File(this_file, 'r') as h5:
file_ready = True
except:
pass
if file_ready:
self.watcher.removePath(this_file) # file is okay for reading now, stop watching it
self.newFiles([this_file])
def handleWatchUpdate(self,path):
myDir = QDir(path)
myDir.setNameFilters(self.supportedExtensions)
allFilesNow = myDir.entryList()
allFilesNow = list(allFilesNow)
allFilesNow = [str(item) for item in allFilesNow]
newFiles = list(set(allFilesNow) - set(self.fileNames))
if newFiles != []:
# prepend full path
new_files_fp = []
for i in range(len(newFiles)):
this_file = os.path.join(self.workingDirectory,newFiles[i])
if this_file.endswith('.h5'):
if h5py.is_hdf5(this_file):
try:
with h5py.File(this_file, 'r') as h5:
new_files_fp.append(this_file)
except:
self.watcher.addPath(this_file) # couldn't open the h5 file, that means it's probably not closed yet, so we need to watch it
else:
new_files_fp.append(this_file)
if new_files_fp != []:
# process all the new files
self.newFiles(new_files_fp)
def statusChanged(self,args):
if not args:
# reset the statusbar background
self.ui.statusbar.setStyleSheet("QStatusBar{padding-left:8px;background:rgba(0,0,0,0);color:black;font-weight:bold;}")
def goodMessage(self):
self.ui.statusbar.setStyleSheet("QStatusBar{padding-left:8px;background:rgba(0,128,0,255);color:black;font-weight:bold;}")
def badMessage(self):
self.ui.statusbar.setStyleSheet("QStatusBar{padding-left:8px;background:rgba(255,0,0,255);color:black;font-weight:bold;}")
# yanked from https://github.com/randlet/to-precision
def to_precision(x,p):
"""
returns a string representation of x formatted with a precision of p
Based on the webkit javascript implementation taken from here:
https://code.google.com/p/webkit-mirror/source/browse/JavaScriptCore/kjs/number_object.cpp
"""
if x is None: # catch none
return str(x)
if not np.isfinite(x): # catch nan and inf
return str(x)
if x == 0.:
return "0." + "0"*(p-1)
out = []
if x < 0:
out.append("-")
x = -x
e = int(math.log10(x))
tens = math.pow(10, e - p + 1)
n = math.floor(x/tens)
if n < math.pow(10, p - 1):
e = e -1
tens = math.pow(10, e - p+1)
n = math.floor(x / tens)
if abs((n + 1.) * tens - x) <= abs(n * tens -x):
n = n + 1
if n >= math.pow(10,p):
n = n / 10.
e = e + 1
m = "%.*g" % (p, n)
if e < -2 or e >= p:
out.append(m[0])
if p > 1:
out.append(".")
out.extend(m[1:p])
out.append('e')
if e > 0:
out.append("+")
out.append(str(e))
elif e == (p -1):
out.append(m)
elif e >= 0:
out.append(m[:e+1])
if e+1 < len(m):
out.append(".")
out.extend(m[e+1:])
else:
out.append("0.")
out.extend(["0"]*-(e+1))
out.append(m)
return "".join(out)
| mit |
twankim/weaksemi | main_local.py | 1 | 8601 | # -*- coding: utf-8 -*-
# @Author: twankim
# @Date: 2017-02-24 17:46:51
# @Last Modified by: twankim
# @Last Modified time: 2018-03-09 22:14:15
import numpy as np
import time
import sys
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from ssac import weakSSAC
from gen_data import genData
from utils import *
weak = "local"
delta = 0.99
base_dir= os.path.join('./results',weak)
def main(args):
plotted = False
rep = args.rep
k = args.k
n = args.n
m = args.m
std = args.std
# qs = [float(q) for q in args.qs.split(',')]
etas = [float(eta) for eta in args.etas.split(',')]
beta = args.beta
i_plot = np.random.randint(0,rep) # Index of experiment to plot the figure
verbose = args.verbose
cs = [float(q) for q in args.cs.split(',')]
res_acc = np.zeros((rep,len(cs),len(etas))) # Accuracy of clustering
res_mean_acc = np.zeros((rep,len(cs),len(etas))) # Mean accuracy of clustering (per cluster)
# res_err = np.zeros((rep,len(qs),len(etas))) # Number of misclustered points
res_fail = np.zeros((rep,len(cs),len(etas))) # Number of Failure
gammas = np.zeros(rep)
nus = np.zeros((rep,len(cs)))
rhos = np.zeros((rep,len(cs)))
# Make directories to save results
if not os.path.exists(base_dir):
os.makedirs(base_dir)
res_dir = base_dir + '/{}_{}'.format(args.min_gamma,args.max_gamma)
if not os.path.exists(res_dir):
os.makedirs(res_dir)
for i_rep in xrange(rep):
# Generate Synthetic data
# m dimensional, n points, k cluster
# min_gamma: minimum gamma margin
if verbose:
print "({}/{})... Generating data".format(i_rep+1,rep)
dataset = genData(n,m,k,args.min_gamma,args.max_gamma,std)
X,y_true,ris = dataset.gen()
gamma = dataset.gamma
gammas[i_rep] = gamma
print "({}/{})... Synthetic data is generated: gamma={}, (n,m,k,std)=({},{},{},{})".format(
i_rep+1,rep,gamma,n,m,k,std)
algo = weakSSAC(X,y_true,k,wtype=weak,ris=ris)
# Test SSAC algorithm for different c's and eta's (fix beta in this case)
for i_c,c_dist in enumerate(cs):
assert (c_dist>0.5) & (c_dist<=1.0), "c_dist must be in (0.5,1]"
nus[i_rep,i_c] = float(gamma) + 1.5*(1-c_dist)
rhos[i_rep,i_c] = c_dist
# Calculate proper eta and beta based on parameters including delta
if verbose:
print " - Proper eta={}, beta={} (delta={})".format(
dataset.calc_eta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]),
dataset.calc_beta(delta,weak=weak,nu=nus[i_rep,i_c],rho=rhos[i_rep,i_c]),
delta)
for i_eta,eta in enumerate(etas):
if verbose:
print " <Test: c_dist={}, eta={}, beta={}>".format(c_dist,eta,beta)
algo.set_params(eta,beta,rho=rhos[i_rep,i_c],nu=nus[i_rep,i_c])
if not algo.fit():
# Algorithm has failed
res_fail[i_rep,i_c,i_eta] = 1
if not plotted:
i_plot = np.random.randint(i_rep+1,rep) # Index of experiment to plot the figure
y_pred = algo.y
mpps = algo.mpps # Estimated cluster centers
# print " ... Clustering is done. Number of binary search steps = {}\n".format(algo.bs_num)
# For evaluation & plotting, find best permutation of cluster assignment
y_pred_perm = find_permutation(dataset,algo)
# Calculate accuracy and mean accuracy
res_acc[i_rep,i_c,i_eta] = accuracy(y_true,y_pred_perm)
res_mean_acc[i_rep,i_c,i_eta] = mean_accuracy(y_true,y_pred_perm)
# # Calculate number of errors
# res_err[i_rep,i_c,i_eta] = error(y_true,y_pred_perm)
if (i_rep == i_plot) and (m<=2) and (not plotted):
if (i_eta==len(etas)-1) and (i_c==len(cs)-1):
plotted = True
title = r"SSAC with {} weak oracle ($\eta={}, \beta={}, \nu={:.2f}, \rho={:.2f}$)".format(
weak,eta,beta,nus[i_rep,i_c],rhos[i_rep,i_c])
f_name = res_dir+'/fig_n{}_m{}_k{}_c{:03d}_e{:d}.png'.format(n,m,k,int(100*c_dist),int(eta))
plot_cluster(X,y_true,y_pred_perm,k,mpps,gamma,
title,f_name,verbose)
# Write result as table
print_eval("Accuracy(%)",res_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("acc",n,m,k),weak=weak,params=cs)
print_eval("Mean Accuracy(%)",res_mean_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("meanacc",n,m,k),weak=weak,params=cs)
# print_eval("# Error(%)",res_err,qs,etas,
# res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("err",n,m,k))
print_eval("# Failures",res_fail,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("fail",n,m,k),
is_sum=True,weak=weak,params=cs)
# if args.isplot:
# Plot Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("acc",n,m,k)
plot_eval("Accuracy(%)",res_acc,etas,fig_name,weak=weak,params=cs)
# Plot Mean Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("meanacc",n,m,k)
plot_eval("Mean Accuracy(%)",res_mean_acc,etas,fig_name,weak=weak,params=cs)
# Plot Failure vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("fail",n,m,k)
plot_eval("# Failures",res_fail,etas,fig_name,is_sum=True,weak=weak,params=cs)
# Plot histogram of gammas
fig_name = res_dir+'/fig_gamma_hist.pdf'
plot_hist(gammas,args.min_gamma,args.max_gamma,fig_name)
if args.isplot:
plt.show()
def parse_args():
def str2bool(v):
return v.lower() in ('true', '1')
parser = argparse.ArgumentParser(description=
'Test Semi-Supervised Active Clustering with Weak Oracles: Random-weak model')
parser.add_argument('-rep', dest='rep',
help='Number of experiments to repeat',
default = 10000, type = int)
parser.add_argument('-k', dest='k',
help='Number of clusters in synthetic data',
default = 3, type = int)
parser.add_argument('-n', dest='n',
help='Number of data points in synthetic data',
default = 600, type = int)
parser.add_argument('-m', dest='m',
help='Dimension of data points in synthetic data',
default = 2, type = int)
parser.add_argument('-std', dest='std',
help='standard deviation of Gaussian distribution (default:1.5)',
default = 2.0, type = float)
parser.add_argument('-qs', dest='qs',
help='Probabilities q (not-sure with 1-q) ex) 0.7,0.85,1',
default = '0.7,0.85,1', type = str)
parser.add_argument('-etas', dest='etas',
help='etas: parameter for sampling (phase 1) ex) 10,50',
default = '2,5,10,20,30', type = str)
parser.add_argument('-beta', dest='beta',
help='beta: parameter for sampling (phase 2)',
default = 1, type = int)
parser.add_argument('-g_min', dest='min_gamma',
help='minimum gamma margin (default:1)',
default = 1.0, type = float)
parser.add_argument('-g_max', dest='max_gamma',
help='minimum gamma margin (default:1)',
default = 1.1, type = float)
parser.add_argument('-cs', dest='cs',
help='Fractions to set distance-weak parameters (0.5,1] ex) 0.7,0.85,1',
default = '0.6,0.8,1', type = str)
parser.add_argument('-isplot', dest='isplot',
help='plot the result: True/False',
default = False, type = str2bool)
parser.add_argument('-verbose', dest='verbose',
help='verbose: True/False',
default = False, type = str2bool)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print "Called with args:"
print args
sys.exit(main(args))
| mit |
denis-gordeev/CNN-aggression-RU | train_tensorflow.py | 1 | 10126 | # -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import itertools
import csv
import gensim
import re
import nltk.data
import tensorflow
from nltk.tokenize import WordPunctTokenizer
from collections import Counter
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import LSTM, Merge
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from IPython.display import SVG, display
from keras.utils.visualize_util import plot, to_graph
# from keras import backend as K
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def message_to_wordlist(message, lemmas_bool, remove_stopwords=False):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
#review_text = BeautifulSoup(review).get_text()
#
# 2. Remove messages numbers
message_text = re.sub(">>\d+","", message)
message_text = message_text.lower()
message_text = re.sub(u"ё", 'e', message_text, re.UNICODE)
message_text = clean_str(message_text)
tokenizer = WordPunctTokenizer()
# 3. Convert words to lower case and split them
words = tokenizer.tokenize(message_text)
lemmas = []
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
if lemmas_bool == 'l':
for word in words:
word_parsed = morph.parse(word)
if len(word_parsed) > 0:
lemmas.append(word_parsed[0].normal_form)
elif lemmas_bool == 's':
for word in words:
word = stemmer.stem(word)
if len(word) > 0:
lemmas.append(word)
else:
lemmas = words
# 5. Return a list of words
return(lemmas)
#return(words)
# Define a function to split a message into parsed sentences
def message_to_sentences( message, tokenizer, lemmas_bool, remove_stopwords=False):
sentences = []
# Function to split a message into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
if type(message) == str:
message = message.decode('utf-8')
raw_sentences = tokenizer.tokenize(message.strip())
#
# 2. Loop over each sentence
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call message_to_wordlist to get a list of words
sentences += message_to_wordlist( raw_sentence,lemmas_bool, remove_stopwords)
return sentences
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
new_labels = []
for label in labels:
if label == 1:
new_labels.append([1,0])
else:
new_labels.append([0,1])
labels = new_labels
y = np.array(labels)
return [x, y]
def load_data():
messages = pd.read_csv( 'aggression.csv', header=0,
delimiter="\t", quoting = csv.QUOTE_MINIMAL )
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
labels = messages[:]['Aggression']
messages = messages[:]['Text']
messages = [message_to_sentences(message, tokenizer, '') for message in messages]
pos_data = [nltk.pos_tag(message) for message in messages]
tags = []
for sent in pos_data:
sent_tags = []
for word in sent:
sent_tags.append(word[1])
tags.append(sent_tags)
messages = pad_sentences(messages) # turn to the same length
tags = pad_sentences(tags)
vocabulary, vocabulary_inv = build_vocab(messages)
vocabulary_pos, vocabulary_inv_pos = build_vocab(tags)
x_pos = np.array([[vocabulary_pos[word] for word in sentence] for sentence in tags])
x, y = build_input_data(messages, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv, vocabulary_pos, vocabulary_inv_pos, x_pos]
np.random.seed(2)
model_variation = 'CNN-non-static' # CNN-rand | CNN-non-static | CNN-static
print('Model variation is %s' % model_variation)
# Model Hyperparameters
sequence_length = 287
embedding_dim = 600
filter_sizes = (3, 4)
num_filters = 150
dropout_prob = (0.25, 0.5)
hidden_dims = 150
# Training parameters
batch_size = 32
num_epochs = 100
val_split = 0.1
# Word2Vec parameters, see train_word2vec
min_word_count = 4 # Minimum word count
context = 10 # Context window size
print("Loading data...")
x, y, vocabulary, vocabulary_inv, voc_pos, voc_inv_pos, x_pos = load_data()
if model_variation == 'CNN-non-static' or model_variation == 'CNN-static':
embedding_model = gensim.models.Word2Vec.load('model')
model_words = embedding_model.index2word
embedding_weights = [np.array([embedding_model[w] if w in vocabulary and w in model_words\
else np.random.uniform(-0.25,0.25,600)\
for w in vocabulary_inv])]
if model_variation=='CNN-static':
x = embedding_weights[0][x]
elif model_variation=='CNN-rand':
embedding_weights = None
else:
raise ValueError('Unknown model variation')
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices].argmax(axis=1)
x_pos = x_pos[shuffle_indices]
print("Vocabulary Size: {:d}".format(len(vocabulary)))
# Building model
# ==================================================
#
# graph subnet with one input and one output,
# convolutional layers concateneted in parallel
graph = Graph()
graph.add_input(name='input', input_shape=(sequence_length, embedding_dim))
for fsz in filter_sizes:
conv = Convolution1D(nb_filter=num_filters,
filter_length=fsz,
border_mode='valid',
activation='relu',
subsample_length=1)
pool = MaxPooling1D(pool_length=2)
graph.add_node(conv, name='conv-%s' % fsz, input='input')
graph.add_node(pool, name='maxpool-%s' % fsz, input='conv-%s' % fsz)
graph.add_node(Flatten(), name='flatten-%s' % fsz, input='maxpool-%s' % fsz)
if len(filter_sizes)>1:
graph.add_output(name='output',
inputs=['flatten-%s' % fsz for fsz in filter_sizes],
merge_mode='concat')
else:
graph.add_output(name='output', input='flatten-%s' % filter_sizes[0])
# main sequential model
model = Sequential()
if not model_variation=='CNN-static':
model.add(Embedding(len(vocabulary), embedding_dim, input_length=sequence_length,
weights=embedding_weights))
# model.add(Embedding(len(vocabulary), 1, input_length=sequence_length)
model.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))
model.add(graph)
model.add(Dense(hidden_dims))
model.add(Dropout(dropout_prob[1]))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
model_b = Sequential()
model_b.add(Dense(287, init='uniform', input_shape=(sequence_length,)))
model_b.add(Dense(32, init='uniform'))
model_b.add(Activation('relu'))
model_b.add(Dense(2, init='uniform'))
model_b.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
decoder = Sequential()
decoder.add(Merge([model, model_b], mode='concat'))
decoder.add(Dense(2, activation='softmax'))
decoder.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
# Training model
# ==================================================
print ("Drawing graph")
graph = to_graph(decoder, show_shape=True)
graph.write_png("model.png")
print ("Training model")
decoder.fit([x_shuffled, x_pos], y_shuffled, batch_size=batch_size,
nb_epoch=num_epochs, show_accuracy=True,
validation_split=val_split, verbose=2)
| mit |
AlmightyMegadeth00/kernel_tegra | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
edublancas/python-ds-tools | src/dstools/util.py | 2 | 6039 | import pickle
from pathlib import Path
import yaml
from pydoc import locate
import re
import collections
from functools import wraps
from inspect import signature, _empty, getargspec
from copy import copy
def isiterable(obj):
try:
iter(obj)
except TypeError:
return False
else:
return True
def _is_iterable(obj):
"""Determine wheter obj is an interable (excluding strings and mappings)
"""
# FIXME: remove this
iterable = isinstance(obj, collections.Iterable)
string = isinstance(obj, str)
mapping = isinstance(obj, collections.Mapping)
return iterable and not string and not mapping
def _wrap_in_list_if_needed(obj):
if _is_iterable(obj):
return obj
else:
return [obj]
def _unwrap_if_single_element(obj):
if len(obj) == 1:
return obj[0]
else:
return obj
def map_parameters_in_fn_call(args, kwargs, func):
"""
Based on function signature, parse args to to convert them to key-value
pairs and merge them with kwargs
Any parameter found in args that does not match the function signature
is still passed.
Missing parameters are filled with their default values
"""
# Get missing parameters in kwargs to look for them in args
args_spec = getargspec(func).args
params_all = set(args_spec)
params_missing = params_all - set(kwargs.keys())
# Remove self parameter from params missing since it's not used
if 'self' in args_spec:
params_missing.remove('self')
offset = 1
else:
offset = 0
# Get indexes for those args
idxs = [args_spec.index(name) for name in params_missing]
# Parse args
args_parsed = dict()
for idx in idxs:
key = args_spec[idx]
try:
value = args[idx-offset]
except IndexError:
pass
else:
args_parsed[key] = value
parsed = copy(kwargs)
parsed.update(args_parsed)
# fill default values
default = {k: v.default for k, v
in signature(func).parameters.items()
if v.default != _empty}
to_add = set(default.keys()) - set(parsed.keys())
default_to_add = {k: v for k, v in default.items() if k in to_add}
parsed.update(default_to_add)
return parsed
def ensure_iterator(param):
"""Ensure a certain parameter or parameters are always an iterator,
unless is None, in that case, it keeps it as it is
"""
def _ensure_repeated(func):
@wraps(func)
def wrapper(*args, **kwargs):
kwargs = map_parameters_in_fn_call(args, kwargs, func)
params = _wrap_in_list_if_needed(param)
for p in params:
if kwargs[p] is not None:
kwargs[p] = _wrap_in_list_if_needed(kwargs[p])
return func(**kwargs)
return wrapper
return _ensure_repeated
def ensure_iterator_in_method(param):
"""Ensure a certain parameters is always an iterator, unles is None,
in that case, it keeps it as it is
"""
def _ensure_repeated(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
kwargs = map_parameters_in_fn_call(args, kwargs, func)
if kwargs[param] is not None:
kwargs[param] = _wrap_in_list_if_needed(kwargs[param])
return func(self, **kwargs)
return wrapper
return _ensure_repeated
def class_name(obj):
class_name = str(type(obj))
class_name = re.search(".*'(.+?)'.*", class_name).group(1)
return class_name
def instantiate_from_class_string(class_str, kwargs):
return locate(class_str)(**kwargs)
def _can_iterate(obj):
import types
import collections
is_string = isinstance(obj, types.StringTypes)
is_iterable = isinstance(obj, collections.Iterable)
return is_iterable and not is_string
def format_column_names(columns, prefix=None):
import re
import pandas as pd
# Get rid of non alphanumeric characters and capital letters
def format_str(s):
re.sub('[^0-9a-zA-Z]+', '_', s).lower()
names = columns.map(format_str)
if prefix:
names = pd.Series(names).map(lambda s: '{}_{}'.format(prefix, s))
return names
def save(obj, path):
path = Path(path)
if path.suffix == '.npy':
import numpy as np
np.save(str(path), obj)
elif path.suffix == '.yaml':
with open(str(path), 'w') as f:
yaml.dump(obj, f)
elif path.suffix == '.pickle':
with open(str(path), 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
else:
raise ValueError('Do not know how to save file with extension '
'{}'.format(path.suffix))
def load(path):
path = Path(path)
if path.suffix == '.npy':
import numpy as np
return np.load(str(path))
elif path.suffix == '.yaml':
with open(str(path), 'r') as f:
return yaml.load(f)
elif path.suffix == '.pickle':
with open(str(path), 'rb') as file:
return pickle.load(file, protocol=pickle.HIGHEST_PROTOCOL)
else:
raise ValueError('Do not know how to save file with extension '
'{}'.format(path.suffix))
# def load_yaml(path):
# '''
# Load yaml file and return the contents of it. If ROOT_FOLDER
# environment variable is defined, the function will load the file
# from ROOT_FOLDER/path else from path
# '''
# try:
# base_path = '{}/'.format(os.environ['ROOT_FOLDER'])
# except:
# base_path = ''
# path = "%s%s" % (base_path, path)
# with open(path, 'r') as f:
# text = f.read()
# return yaml.load(text)
# try:
# config = load_yaml('config.yaml')
# except Exception, e:
# pass
# try:
# db_uri = ('{dialect}://{user}:{password}@{host}:{port}}/{database}'
# .format(**config['db']))
# except Exception, e:
# pass
| mit |
mojoboss/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/androidviewclient-13.4.0-py2.7.egg/com/dtmilano/android/plot.py | 2 | 6871 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2012-2017 Diego Torres Milano
Created on mar 11, 2017
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
"""
import sys
import types
from math import ceil
import matplotlib.pyplot as plt
import mpl_toolkits.axisartist as AA
import numpy as np
from mpl_toolkits.axes_grid1 import host_subplot
from com.dtmilano.android.adb.dumpsys import Dumpsys
__version__ = '13.4.1'
DEBUG = True
NumberTypes = (types.IntType, types.LongType, types.FloatType)
class Plot:
def __init__(self):
self.n = 0
self.na = []
self.va = []
self.ava = {}
''' Associative values array '''
self.aava = {}
''' (another) Associative values array '''
def append(self, value):
self.n += 1
self.na.append(self.n)
if isinstance(value, NumberTypes):
self.va.append(value)
elif isinstance(value, Dumpsys):
if not self.ava:
self.__initAva()
if not self.aava:
self.__initAava()
dumpsys = value
self.ava[Dumpsys.TOTAL].append(dumpsys.get(Dumpsys.TOTAL))
self.ava[Dumpsys.ACTIVITIES].append(dumpsys.get(Dumpsys.ACTIVITIES))
self.ava[Dumpsys.VIEWS].append(dumpsys.get(Dumpsys.VIEWS))
# self.ava[Dumpsys.VIEW_ROOT_IMPL].append(dumpsys.get(Dumpsys.VIEW_ROOT_IMPL))
self.aava[Dumpsys.FRAMESTATS].append(dumpsys.get(Dumpsys.FRAMESTATS))
return self
def __initAva(self):
self.ava[Dumpsys.TOTAL] = []
self.ava[Dumpsys.ACTIVITIES] = []
self.ava[Dumpsys.VIEWS] = []
# self.ava[Dumpsys.VIEW_ROOT_IMPL] = []
def __initAava(self):
self.aava[Dumpsys.FRAMESTATS] = []
def plot(self, _type=Dumpsys.MEMINFO, filename=None):
title = "Dumpsys"
if _type == Dumpsys.FRAMESTATS:
subtitle = "gfxinfo " + Dumpsys.FRAMESTATS
else:
subtitle = _type
if _type == Dumpsys.MEMINFO:
if self.ava:
if DEBUG:
print >> sys.stderr, "plot:"
for k in self.ava.keys():
print >> sys.stderr, " ", k, ":", self.ava[k]
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par = {}
for k in self.ava.keys():
if k != Dumpsys.TOTAL:
par[k] = host.twinx()
axis = 1
for k in self.ava.keys():
if k != Dumpsys.TOTAL and k != Dumpsys.ACTIVITIES:
offset = axis * 60
axis += 1
new_fixed_axis = par[k].get_grid_helper().new_fixed_axis
par[k].axis["right"] = new_fixed_axis(loc="right",
axes=par[k],
offset=(offset, 0))
par[k].axis["right"].toggle(all=True)
if DEBUG:
print >> sys.stderr, "setting host x lim ", (np.amin(self.na), np.amax(self.na))
minx = np.amin(self.na)
maxx = np.amax(self.na)
divx = abs(maxx - minx) / (len(self.na) * 1.0)
host.set_xlim(minx - divx, maxx + divx)
miny = np.amin(self.ava[Dumpsys.TOTAL])
maxy = np.amax(self.ava[Dumpsys.TOTAL])
divy = ceil(abs(maxy - miny) / (len(self.ava[Dumpsys.TOTAL]) * 1.0))
if DEBUG:
print >> sys.stderr, "setting host y lim ", (miny - divy, maxy + divy)
host.set_ylim(miny - divy, maxy + divy)
host.set_xlabel('N')
host.set_ylabel(Dumpsys.TOTAL)
for k in self.ava.keys():
if k != Dumpsys.TOTAL:
par[k].set_ylabel(k)
plots = {}
if DEBUG:
print >> sys.stderr, " host plot", self.na, ":", self.ava[Dumpsys.TOTAL]
plots[Dumpsys.TOTAL], = host.plot(self.na, self.ava[Dumpsys.TOTAL], label=Dumpsys.TOTAL, linewidth=2)
for k in self.ava.keys():
if k != Dumpsys.TOTAL:
if DEBUG:
print >> sys.stderr, " ", k, " plot", self.na, ":", self.ava[k]
plots[k], = par[k].plot(self.na, self.ava[k], label=k, linewidth=2)
for k in self.ava.keys():
if k != Dumpsys.TOTAL:
miny = np.amin(self.ava[k])
maxy = np.amax(self.ava[k])
divy = ceil(abs(maxy - miny) / (len(self.ava[k]) * 1.0))
if DEBUG:
print >> sys.stderr, "setting", k, "y lim ", (miny - divy, maxy + divy)
par[k].set_ylim(miny - divy, maxy + divy)
host.legend()
# host.axis["left"].label.set_color(plots[Dumpsys.TOTAL].get_color())
# for k in self.ava.keys():
# if k != Dumpsys.TOTAL:
# par[k].axis["right"].label.set_color(plots[k].get_color())
elif self.va:
plt.xlabel('N')
plt.ylabel('V')
plt.plot(self.na, self.va, label="A")
else:
raise RuntimeError("No values to plot")
elif _type == Dumpsys.FRAMESTATS:
n, bins, patches = plt.hist(self.aava[Dumpsys.FRAMESTATS])
ymax = np.amax(n)
x = []
y = []
for v in range(int(ceil(ymax)) + 1):
x.append(1 / 60.0 * 10 ** 9)
y.append(v)
plt.plot(x, y, linewidth=2, color='c')
x = []
y = []
for v in range(int(ceil(ymax)) + 1):
x.append(1 / 30.0 * 10 ** 9)
y.append(v)
plt.plot(x, y, linewidth=2, color='r')
plt.xlabel('ms')
plt.ylabel('Frames')
plt.title(title + ' ' + subtitle)
plt.grid(True)
plt.draw()
if filename:
plt.savefig(filename)
else:
plt.show()
| gpl-3.0 |
nmartensen/pandas | pandas/tests/indexes/datetimes/test_tools.py | 1 | 65488 | """ test to_datetime """
import sys
import pytz
import pytest
import locale
import calendar
import dateutil
import numpy as np
from dateutil.parser import parse
from datetime import datetime, date, time
from distutils.version import LooseVersion
import pandas as pd
from pandas._libs import tslib, lib
from pandas.core.tools import datetimes as tools
from pandas.core.tools.datetimes import normalize_date
from pandas.compat import lmap
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.util import testing as tm
from pandas.util.testing import assert_series_equal, _skip_if_has_locale
from pandas import (isna, to_datetime, Timestamp, Series, DataFrame,
Index, DatetimeIndex, NaT, date_range, bdate_range,
compat)
class TestTimeConversionFormats(object):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301')]
results2 = [Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')]
for vals, expecteds in [(values, (Index(results1), Index(results2))),
(Series(values),
(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2]))]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"), Timestamp("19801222")] +
[Timestamp("19810105")] * 5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format='%Y%m%d', errors='ignore')
expected = Series([datetime(2012, 12, 31),
datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format='%Y%m%d', errors='coerce')
expected = Series(['20121231', '20141231', 'NaT'], dtype='M8[ns]')
assert_series_equal(result, expected)
# GH 10178
def test_to_datetime_format_integer(self):
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y')
assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str)
])
result = to_datetime(s, format='%Y%m')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = '01-{}-2011 00:00:01.978'.format(month_abbr)
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = datetime.strptime(val, format)
assert result == exp
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M',
Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M',
Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S',
Timestamp('2010-01-10 13:56:01')] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
def test_to_datetime_with_non_exact(self):
# GH 10834
tm._skip_if_has_locale()
# 8904
# exact kw
if sys.version_info < (2, 7):
pytest.skip('on python version < 2.7')
s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00',
'19MAY11 00:00:00Z'])
result = to_datetime(s, format='%d%b%y', exact=False)
expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False),
format='%d%b%y')
assert_series_equal(result, expected)
def test_parse_nanoseconds_with_formula(self):
# GH8989
# trunctaing the nanoseconds when a format was provided
for v in ["2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000", ]:
expected = pd.to_datetime(v)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f")
assert result == expected
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
class TestToDatetime(object):
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
assert pd.to_datetime(dt) == Timestamp(dt)
oob_dts = [np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ]
for dt in oob_dts:
pytest.raises(ValueError, pd.to_datetime, dt, errors='raise')
pytest.raises(ValueError, Timestamp, dt)
assert pd.to_datetime(dt, errors='coerce') is NaT
def test_to_datetime_array_of_dt64s(self):
dts = [np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
pytest.raises(ValueError, pd.to_datetime, dts_with_oob,
errors='raise')
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='coerce'),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
tslib.iNaT,
],
dtype='M8'
)
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='ignore'),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_to_datetime_tz(self):
# xref 8260
# uniform returns a DatetimeIndex
arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]
result = pd.to_datetime(arr)
expected = DatetimeIndex(
['2013-01-01 13:00:00', '2013-01-02 14:00:00'], tz='US/Pacific')
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')]
pytest.raises(ValueError, lambda: pd.to_datetime(arr))
def test_to_datetime_tz_pytz(self):
# see gh-8260
us_eastern = pytz.timezone('US/Eastern')
arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1,
hour=3, minute=0)),
us_eastern.localize(datetime(year=2000, month=6, day=1,
hour=3, minute=0))],
dtype=object)
result = pd.to_datetime(arr, utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("init_constructor, end_constructor, test_method",
[(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal)])
def test_to_datetime_utc_true(self,
init_constructor,
end_constructor,
test_method):
# See gh-11934 & gh-6415
data = ['20100102 121314', '20100102 121315']
expected_data = [pd.Timestamp('2010-01-02 12:13:14', tz='utc'),
pd.Timestamp('2010-01-02 12:13:15', tz='utc')]
result = pd.to_datetime(init_constructor(data),
format='%Y%m%d %H%M%S',
utc=True)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True)
assert result == expected
def test_to_datetime_utc_true_with_series_single_value(self):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(pd.Series([ts]), utc=True)
expected = pd.Series([pd.Timestamp(ts, tz='utc')])
tm.assert_series_equal(result, expected)
def test_to_datetime_utc_true_with_series_tzaware_string(self):
ts = '2013-01-01 00:00:00-01:00'
expected_ts = '2013-01-01 01:00:00'
data = pd.Series([ts] * 3)
result = pd.to_datetime(data, utc=True)
expected = pd.Series([pd.Timestamp(expected_ts, tz='utc')] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('date, dtype',
[('2013-01-01 01:00:00', 'datetime64[ns]'),
('2013-01-01 01:00:00', 'datetime64[ns, UTC]')])
def test_to_datetime_utc_true_with_series_datetime_ns(self, date, dtype):
expected = pd.Series([pd.Timestamp('2013-01-01 01:00:00', tz='UTC')])
result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True)
tm.assert_series_equal(result, expected)
def test_to_datetime_tz_psycopg2(self):
# xref 8260
try:
import psycopg2
except ImportError:
pytest.skip("no psycopg2 installed")
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array([datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2)],
dtype=object)
result = pd.to_datetime(arr, errors='coerce', utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
# dtype coercion
i = pd.DatetimeIndex([
'2000-01-01 08:00:00+00:00'
], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
assert is_datetime64_ns_dtype(i)
# tz coerceion
result = pd.to_datetime(i, errors='coerce')
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors='coerce', utc=True)
expected = pd.DatetimeIndex(['2000-01-01 13:00:00'],
dtype='datetime64[ns, UTC]')
tm.assert_index_equal(result, expected)
def test_datetime_bool(self):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
assert to_datetime(False, errors="coerce") is NaT
assert to_datetime(False, errors="ignore") is False
with pytest.raises(TypeError):
to_datetime(True)
assert to_datetime(True, errors="coerce") is NaT
assert to_datetime(True, errors="ignore") is True
with pytest.raises(TypeError):
to_datetime([False, datetime.today()])
with pytest.raises(TypeError):
to_datetime(['20130101', True])
tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
errors="coerce"),
DatetimeIndex([to_datetime(0), NaT,
NaT, to_datetime(0)]))
def test_datetime_invalid_datatype(self):
# GH13176
with pytest.raises(TypeError):
pd.to_datetime(bool)
with pytest.raises(TypeError):
pd.to_datetime(pd.to_datetime)
class TestToDatetimeUnit(object):
def test_unit(self):
# GH 11758
# test proper behavior with erros
with pytest.raises(ValueError):
to_datetime([1], unit='D', format='%Y%m%d')
values = [11111111, 1, 1.0, tslib.iNaT, NaT, np.nan,
'NaT', '']
result = to_datetime(values, unit='D', errors='ignore')
expected = Index([11111111, Timestamp('1970-01-02'),
Timestamp('1970-01-02'), NaT,
NaT, NaT, NaT, NaT],
dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit='D', errors='coerce')
expected = DatetimeIndex(['NaT', '1970-01-02', '1970-01-02',
'NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, unit='D', errors='raise')
values = [1420043460000, tslib.iNaT, NaT, np.nan, 'NaT']
result = to_datetime(values, errors='ignore', unit='s')
expected = Index([1420043460000, NaT, NaT,
NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors='coerce', unit='s')
expected = DatetimeIndex(['NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, errors='raise', unit='s')
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ['foo', Timestamp('20130101')]:
try:
to_datetime(val, errors='raise', unit='s')
except tslib.OutOfBoundsDatetime:
raise AssertionError("incorrect exception raised")
except ValueError:
pass
def test_unit_consistency(self):
# consistency of conversions
expected = Timestamp('1970-05-09 14:25:11')
result = pd.to_datetime(11111111, unit='s', errors='raise')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='coerce')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='ignore')
assert result == expected
assert isinstance(result, Timestamp)
def test_unit_with_numeric(self):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr1 = [1.434692e+18, 1.432766e+18]
arr2 = np.array(arr1).astype('int64')
for errors in ['ignore', 'raise', 'coerce']:
result = pd.to_datetime(arr1, errors=errors)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(['NaT',
'2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr = ['foo', 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20',
'NaT',
'NaT'])
arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT']
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
def test_unit_mixed(self):
# mixed integers/datetimes
expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT'])
arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
expected = DatetimeIndex(['NaT',
'NaT',
'2013-01-01'])
arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
def test_dataframe(self):
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [58, 59],
'second': [10, 11],
'ms': [1, 1],
'us': [2, 2],
'ns': [3, 3]})
result = to_datetime({'year': df['year'],
'month': df['month'],
'day': df['day']})
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:0:00')])
assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[['year', 'month', 'day']].to_dict())
assert_series_equal(result, expected)
# dict but with constructable
df2 = df[['year', 'month', 'day']].to_dict()
df2['month'] = 2
result = to_datetime(df2)
expected2 = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160205 00:0:00')])
assert_series_equal(result, expected2)
# unit mappings
units = [{'year': 'years',
'month': 'months',
'day': 'days',
'hour': 'hours',
'minute': 'minutes',
'second': 'seconds'},
{'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second'},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10'),
Timestamp('20160305 07:59:11')])
assert_series_equal(result, expected)
d = {'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second',
'ms': 'ms',
'us': 'us',
'ns': 'ns'}
result = to_datetime(df.rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10.001002003'),
Timestamp('20160305 07:59:11.001002003')])
assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str))
assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
msg = ("cannot assemble the datetimes: time data .+ does not "
"match format '%Y%m%d' \(match\)")
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
result = to_datetime(df2, errors='coerce')
expected = Series([Timestamp('20150204 00:00:00'),
NaT])
assert_series_equal(result, expected)
# extra columns
msg = ("extra keys have been passed to the datetime assemblage: "
"\[foo\]")
with tm.assert_raises_regex(ValueError, msg):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2)
# not enough
msg = ('to assemble mappings requires at least that \[year, month, '
'day\] be specified: \[.+\] is missing')
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df[c])
# duplicates
msg = 'cannot assemble with duplicate keys'
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
def test_dataframe_dtypes(self):
# #13451
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
# int16
result = to_datetime(df.astype('int16'))
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# mixed dtypes
df['month'] = df['month'].astype('int8')
df['day'] = df['day'].astype('int8')
result = to_datetime(df)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# float
df = DataFrame({'year': [2000, 2001],
'month': [1.5, 1],
'day': [1, 1]})
with pytest.raises(ValueError):
to_datetime(df)
class TestToDatetimeMisc(object):
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = idx.to_datetime()
expected = DatetimeIndex(pd.to_datetime(idx.values))
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
tm.assert_index_equal(result, expected)
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
assert result[0] == exp
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
assert result[0] == s[0]
def test_to_datetime_with_space_in_series(self):
# GH 6428
s = Series(['10/18/2006', '10/18/2008', ' '])
pytest.raises(ValueError, lambda: to_datetime(s, errors='raise'))
result_coerce = to_datetime(s, errors='coerce')
expected_coerce = Series([datetime(2006, 10, 18),
datetime(2008, 10, 18),
NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors='ignore')
tm.assert_series_equal(result_ignore, s)
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
tm._skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3])
pytest.raises(ValueError,
lambda: pd.to_datetime(td, format='%b %y',
errors='raise'))
pytest.raises(ValueError,
lambda: td.apply(pd.to_datetime, format='%b %y',
errors='raise'))
expected = pd.to_datetime(td, format='%b %y', errors='coerce')
result = td.apply(
lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
assert_series_equal(result, expected)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
assert result is NaT
result = to_datetime(['', ''])
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ['20120101', '20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp, array)
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
def test_to_datetime_unprocessable_input(self):
# GH 4928
tm.assert_numpy_array_equal(
to_datetime([1, '1'], errors='ignore'),
np.array([1, '1'], dtype='O')
)
pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype('O')
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
assert xp.freq == rs.freq
assert xp.tzinfo == rs.tzinfo
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if isna(val):
expected[i] = tslib.iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
# GH 10636, default is now 'raise'
pytest.raises(ValueError,
lambda: to_datetime(malformed, errors='raise'))
result = to_datetime(malformed, errors='ignore')
tm.assert_numpy_array_equal(result, malformed)
pytest.raises(ValueError, to_datetime, malformed, errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = tslib.iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected, check_names=False)
assert result.name == 'foo'
assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == 'foo'
def test_dti_constructor_numpy_timeunits(self):
# GH 9114
base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'])
for dtype in ['datetime64[h]', 'datetime64[m]', 'datetime64[s]',
'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]']:
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values), base)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
class TestGuessDatetimeFormat(object):
def test_guess_datetime_format_with_parseable_formats(self):
tm._skip_if_not_us_locale()
dt_string_to_format = (('20111230', '%Y%m%d'),
('2011-12-30', '%Y-%m-%d'),
('30-12-2011', '%d-%m-%Y'),
('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'),
('2011-12-30 00:00:00.000000',
'%Y-%m-%d %H:%M:%S.%f'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_with_dayfirst(self):
ambiguous_string = '01/01/2011'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=True) == '%d/%m/%Y'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=False) == '%m/%d/%Y'
def test_guess_datetime_format_with_locale_specific_formats(self):
# The month names will vary depending on the locale, in which
# case these wont be parsed properly (dateutil can't parse them)
tm._skip_if_has_locale()
dt_string_to_format = (('30/Dec/2011', '%d/%b/%Y'),
('30/December/2011', '%d/%B/%Y'),
('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_invalid_inputs(self):
# A datetime string must include a year, month and a day for it
# to be guessable, in addition to being a string that looks like
# a datetime
invalid_dts = [
'2013',
'01/2013',
'12:00:00',
'1/1/1/1',
'this_is_not_a_datetime',
'51a',
9,
datetime(2011, 1, 1),
]
for invalid_dt in invalid_dts:
assert tools._guess_datetime_format(invalid_dt) is None
def test_guess_datetime_format_nopadding(self):
# GH 11142
dt_string_to_format = (('2011-1-1', '%Y-%m-%d'),
('30-1-2011', '%d-%m-%Y'),
('1/1/2011', '%m/%d/%Y'),
('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'),
('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S'))
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_for_array(self):
tm._skip_if_not_us_locale()
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(
test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array(
[np.nan, np.nan, np.nan], dtype='O'))
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat(object):
def test_to_datetime_infer_datetime_format_consistent_format(self):
s = pd.Series(pd.date_range('20000101', periods=50, freq='H'))
test_formats = ['%m-%d-%Y', '%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f']
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=False)
yes_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=True)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
s = pd.Series(np.array(['01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00']))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
s = pd.Series(np.array(['Jan/01/2011', 'Feb/01/2011', 'Mar/01/2011']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_with_nans(self):
s = pd.Series(np.array(['01/01/2011 00:00:00', np.nan,
'01/03/2011 00:00:00', np.nan]))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_starting_with_nans(self):
s = pd.Series(np.array([np.nan, np.nan, '01/01/2011 00:00:00',
'01/02/2011 00:00:00', '01/03/2011 00:00:00']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_iso8601_noleading_0s(self):
# GH 11871
s = pd.Series(['2014-1-1', '2014-2-2', '2015-3-3'])
expected = pd.Series([pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-02-02'),
pd.Timestamp('2015-03-03')])
tm.assert_series_equal(pd.to_datetime(s), expected)
tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d'), expected)
class TestDaysInMonth(object):
# tests for issue #10154
def test_day_not_in_month_coerce(self):
assert isna(to_datetime('2015-02-29', errors='coerce'))
assert isna(to_datetime('2015-02-29', format="%Y-%m-%d",
errors='coerce'))
assert isna(to_datetime('2015-02-32', format="%Y-%m-%d",
errors='coerce'))
assert isna(to_datetime('2015-04-31', format="%Y-%m-%d",
errors='coerce'))
def test_day_not_in_month_raise(self):
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise')
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-02-32',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-04-31',
errors='raise', format="%Y-%m-%d")
def test_day_not_in_month_ignore(self):
assert to_datetime('2015-02-29', errors='ignore') == '2015-02-29'
assert to_datetime('2015-02-29', errors='ignore',
format="%Y-%m-%d") == '2015-02-29'
assert to_datetime('2015-02-32', errors='ignore',
format="%Y-%m-%d") == '2015-02-32'
assert to_datetime('2015-04-31', errors='ignore',
format="%Y-%m-%d") == '2015-04-31'
class TestDatetimeParsingWrappers(object):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
assert not tslib._does_string_look_like_datetime(bad_date_string)
good_date_strings = ('2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1', )
for good_date_string in good_date_strings:
assert tslib._does_string_look_like_datetime(good_date_string)
def test_parsers(self):
# https://github.com/dateutil/dateutil/issues/217
import dateutil
yearfirst = dateutil.__version__ >= LooseVersion('2.5.0')
cases = {'2011-01-01': datetime(2011, 1, 1),
'2Q2005': datetime(2005, 4, 1),
'2Q05': datetime(2005, 4, 1),
'2005Q1': datetime(2005, 1, 1),
'05Q1': datetime(2005, 1, 1),
'2011Q3': datetime(2011, 7, 1),
'11Q3': datetime(2011, 7, 1),
'3Q2011': datetime(2011, 7, 1),
'3Q11': datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime(2000, 10, 1),
'00Q4': datetime(2000, 10, 1),
'4Q2000': datetime(2000, 10, 1),
'4Q00': datetime(2000, 10, 1),
'2000q4': datetime(2000, 10, 1),
'2000-Q4': datetime(2000, 10, 1),
'00-Q4': datetime(2000, 10, 1),
'4Q-2000': datetime(2000, 10, 1),
'4Q-00': datetime(2000, 10, 1),
'00q4': datetime(2000, 10, 1),
'2005': datetime(2005, 1, 1),
'2005-11': datetime(2005, 11, 1),
'2005 11': datetime(2005, 11, 1),
'11-2005': datetime(2005, 11, 1),
'11 2005': datetime(2005, 11, 1),
'200511': datetime(2020, 5, 11),
'20051109': datetime(2005, 11, 9),
'20051109 10:15': datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10,
36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime(2014, 6, 1),
'06-2014': datetime(2014, 6, 1),
'2014-6': datetime(2014, 6, 1),
'6-2014': datetime(2014, 6, 1),
'20010101 12': datetime(2001, 1, 1, 12),
'20010101 1234': datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime(2001, 1, 1, 12, 34, 56),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
assert result7 == expected
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
assert result1 is tslib.NaT
assert result2 is tslib.NaT
assert result3 is tslib.NaT
assert result4 is tslib.NaT
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.']
for case in cases:
pytest.raises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime(2012, 10, 11)),
(True, False,
datetime(2012, 11, 10)),
(False, True,
datetime(2010, 11, 12)),
(True, True,
datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime(2021, 12, 20)),
(True, False,
datetime(2021, 12, 20)),
(False, True,
datetime(2020, 12, 21)),
(True, True,
datetime(2020, 12, 21))]}
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
assert dateutil_result == expected
result1, _, _ = tools.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
def test_parsers_timestring(self):
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
def test_parsers_time(self):
# GH11818
_skip_if_has_locale()
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", time(14, 15)]
expected = time(14, 15)
for time_string in strings:
assert tools.to_time(time_string) == expected
new_string = "14.15"
pytest.raises(ValueError, tools.to_time, new_string)
assert tools.to_time(new_string, format="%H.%M") == expected
arg = ["14:15", "20:20"]
expected_arr = [time(14, 15), time(20, 20)]
assert tools.to_time(arg) == expected_arr
assert tools.to_time(arg, format="%H:%M") == expected_arr
assert tools.to_time(arg, infer_time_format=True) == expected_arr
assert tools.to_time(arg, format="%I:%M%p",
errors="coerce") == [None, None]
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with pytest.raises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
tm.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
assert isinstance(res, list)
assert res == expected_arr
def test_parsers_monthfreq(self):
cases = {'201101': datetime(2011, 1, 1, 0, 0),
'200005': datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
assert result1 == expected
def test_parsers_quarterly_with_freq(self):
msg = ('Incorrect quarterly string is given, quarter '
'must be between 1 and 4: 2013Q5')
with tm.assert_raises_regex(tslib.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = ('Unable to retrieve month information from given freq: '
'INVLD-L-DEC-SAT')
with tm.assert_raises_regex(tslib.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
assert result == exp
def test_parsers_timezone_minute_offsets_roundtrip(self):
# GH11708
base = to_datetime("2013-01-01 00:00:00")
dt_strings = [
('2013-01-01 05:45+0545',
"Asia/Katmandu",
"Timestamp('2013-01-01 05:45:00+0545', tz='Asia/Katmandu')"),
('2013-01-01 05:30+0530',
"Asia/Kolkata",
"Timestamp('2013-01-01 05:30:00+0530', tz='Asia/Kolkata')")
]
for dt_string, tz, dt_string_repr in dt_strings:
dt_time = to_datetime(dt_string)
assert base == dt_time
converted_time = dt_time.tz_localize('UTC').tz_convert(tz)
assert dt_string_repr == repr(converted_time)
def test_parsers_iso8601(self):
# GH 12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
cases = {'2011-01-02': datetime(2011, 1, 2),
'2011-1-2': datetime(2011, 1, 2),
'2011-01': datetime(2011, 1, 1),
'2011-1': datetime(2011, 1, 1),
'2011 01 02': datetime(2011, 1, 2),
'2011.01.02': datetime(2011, 1, 2),
'2011/01/02': datetime(2011, 1, 2),
'2011\\01\\02': datetime(2011, 1, 2),
'2013-01-01 05:30:00': datetime(2013, 1, 1, 5, 30),
'2013-1-1 5:30:00': datetime(2013, 1, 1, 5, 30)}
for date_str, exp in compat.iteritems(cases):
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
# seperators must all match - YYYYMM not valid
invalid_cases = ['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456', '20010101 1234:56',
# HHMMSS must have two digits in each component
# if unseparated
'20010101 1', '20010101 123', '20010101 12345',
'20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56']
for date_str in invalid_cases:
with pytest.raises(ValueError):
tslib._test_parse_iso8601(date_str)
# If no ValueError raised, let me know which case failed.
raise Exception(date_str)
class TestArrayToDatetime(object):
def test_try_parse_dates(self):
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
result = lib.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
assert np.array_equal(result, expected)
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00'
]
expected_output = tslib.array_to_datetime(np.array(
['01-01-2013 00:00:00'], dtype=object))
for dt_string in dt_strings:
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
date(1000, 1, 1),
datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
pytest.raises(ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
errors='raise', )
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'),
errors='coerce'),
np.array([tslib.iNaT], dtype='M8[ns]')
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_normalize_date():
value = date(2012, 9, 7)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
def units(request):
return request.param
@pytest.fixture
def epoch_1960():
# for origin as 1960-01-01
return Timestamp('1960-01-01')
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=[epoch_1960(),
epoch_1960().to_pydatetime(),
epoch_1960().to_datetime64(),
str(epoch_1960())])
def epochs(request):
return request.param
@pytest.fixture
def julian_dates():
return pd.date_range('2014-1-1', periods=10).to_julian_date().values
class TestOrigin(object):
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(
julian_dates, unit='D', origin='julian'))
expected = Series(pd.to_datetime(
julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
assert_series_equal(result, expected)
result = Series(pd.to_datetime(
[0, 1, 2], unit='D', origin='unix'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime(
[0, 1, 2], unit='D'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin='julian', unit='D')
assert result.to_julian_date() == 2456658
# out-of-bounds
with pytest.raises(ValueError):
pd.to_datetime(1, origin="julian", unit='D')
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != 'D':
with pytest.raises(ValueError):
pd.to_datetime(julian_dates, unit=units, origin='julian')
def test_invalid_origin(self):
# need to have a numeric specified
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit='D')
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) +
epoch_1960 for x in units_from_epochs])
result = Series(pd.to_datetime(
units_from_epochs, unit=units, origin=epochs))
assert_series_equal(result, expected)
@pytest.mark.parametrize("origin, exc",
[('random_string', ValueError),
('epoch', ValueError),
('13-24-1990', ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime)])
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
with pytest.raises(exc):
pd.to_datetime(units_from_epochs, unit=units,
origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError):
pd.to_datetime(1, unit='D',
origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit='D')
expected = Timestamp('2169-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(200 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2069-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(300 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2169-10-20 00:00:00')
assert result == expected
| bsd-3-clause |
vellamike/optimizer | optimizer/graphic.py | 1 | 97405 | import wx
import sys
from traceHandler import sizeError
try:
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
except RuntimeError as re:
print re
sys.exit()
#from inspyred.ec import analysis
from inspyred.ec.analysis import generation_plot
import inspyred
#from wxPython._controls import wxTextCtrl
import os
from string import count, split, strip
from copy import copy
import Core
class boundarywindow(wx.Frame):
def __init__(self, par):
wx.Frame.__init__(self,par, wx.ID_PROPERTIES, "Boundaries", size=(600, 700))
panel = wx.Panel(self)
#self.Bind(wx.EVT_CLOSE, self.my_close)
self.par = par
hstep = 400
vstep = 35
hoffset = 10
voffset = 15
self.min = []
self.max = []
for l in range(len(self.par.core.option_handler.GetObjTOOpt())):
param=self.par.core.option_handler.GetObjTOOpt()[l].split()
if len(param)==4:
label=param[0] + " " + param[1] + " " + param[3]
else:
if param[0]!=param[-1]:
label=param[0] + " " + param[-1]
else:
label=param[-1]
#wx.StaticText(panel, label=self.par.core.option_handler.GetObjTOOpt()[l].split()[-1], pos=(hoffset, voffset + l * vstep))
wx.StaticText(panel, label=label, pos=(hoffset, voffset + l * vstep))
tmp_min = wx.TextCtrl(panel, id=l, pos=(hstep, voffset + l * vstep), size=(75, 30))
self.min.append(tmp_min)
tmp_max = wx.TextCtrl(panel, id=l + len(self.par.core.option_handler.GetOptParam()), pos=(hstep / 4 + hstep, voffset + l * vstep), size=(75, 30))
self.max.append(tmp_max)
if len(self.par.core.option_handler.boundaries[1]) == len(self.par.core.option_handler.GetObjTOOpt()):
tmp_min.SetValue(str(self.par.core.option_handler.boundaries[0][l]))
tmp_max.SetValue(str(self.par.core.option_handler.boundaries[1][l]))
Setbutton = wx.Button(panel, label="Set", pos=(hstep, 650))
Setbutton.Bind(wx.EVT_BUTTON, self.Set)
Savebutton = wx.Button(panel, label="Save", pos=(100, 650))
Savebutton.Bind(wx.EVT_BUTTON, self.Save)
Loadbutton = wx.Button(panel, label="Load", pos=(300, 650))
Loadbutton.Bind(wx.EVT_BUTTON, self.Load)
self.Show()
self.save_file_name="boundaries.txt"
def Set(self, e):
try:
self.par.core.option_handler.boundaries[0] = [float(n.GetValue()) for n in self.min]
self.par.core.option_handler.boundaries[1] = [float(n.GetValue()) for n in self.max]
except ValueError as ve:
wx.MessageBox(str(ve), "Invalid Value", wx.OK | wx.ICON_ERROR)
else:
for i in range(len(self.par.core.option_handler.boundaries[0])):
if self.par.core.option_handler.boundaries[0][i] >= self.par.core.option_handler.boundaries[1][i] :
wx.MessageBox("Min boundary must be lower than max", "Invalid Values", wx.OK | wx.ICON_ERROR)
break
#self.boundaries_window.Destroy()
self.Close()
def Save(self,e):
dlg = wx.FileDialog(self, "Type a filename", os.getcwd(), "", "*.*", style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.save_file_name=dlg.GetFilename()
f=open(self.save_file_name,"w")
for _min,_max in zip(self.min,self.max):
f.write(str(_min.GetValue()))
f.write("\t")
f.write(str(_max.GetValue()))
f.write("\n")
dlg.Destroy()
def Load(self,e):
dlg = wx.FileDialog(self, "Select a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
load_file_name=dlg.GetDirectory()+"/"+dlg.GetFilename()
#print load_file_name
try:
f=open(load_file_name,"r")
for idx,l in enumerate(f):
bounds=l.split()
self.min[idx].SetValue(bounds[0])
self.max[idx].SetValue(bounds[1])
except IOError:
wx.MessageBox('Error reading the file', 'Error', wx.OK | wx.ICON_ERROR)
dlg.Destroy()
def my_close(self, e):
wx.Exit()
class gridwindow(wx.Frame):
def __init__(self, par):
wx.Frame.__init__(self,par, wx.ID_PROPERTIES, "Grid Boundaries", size=(400, 700))
panel = wx.Panel(self)
#self.Bind(wx.EVT_CLOSE, self.my_close)
self.par = par
hstep = 200
vstep = 35
hoffset = 10
voffset = 15
self.min = []
self.max = []
for l in range(len(self.par.core.option_handler.GetObjTOOpt())):
wx.StaticText(panel, label=self.par.core.option_handler.GetObjTOOpt()[l].split()[-1], pos=(hoffset, voffset + l * vstep))
tmp_min = wx.TextCtrl(panel, id=l, pos=(hstep, voffset + l * vstep), size=(75, 30))
self.min.append(tmp_min)
tmp_max = wx.TextCtrl(panel, id=l + len(self.par.core.option_handler.GetOptParam()), pos=(hstep / 2 + hstep, voffset + l * vstep), size=(75, 30))
self.max.append(tmp_max)
if len(self.par.core.option_handler.boundaries[1]) == len(self.par.core.option_handler.GetObjTOOpt()):
tmp_min.SetValue(str(self.par.core.option_handler.boundaries[0][l]))
tmp_max.SetValue(str(self.par.core.option_handler.boundaries[1][l]))
wx.StaticText(panel,label="Resolution:", pos=(hoffset,600))
self.resolution_ctrl=wx.TextCtrl(panel,id=wx.ID_ANY,pos=(hstep,600),size=(75,30))
self.resolution_ctrl.SetValue(str(self.par.resolution))
Setbutton = wx.Button(panel, label="Set", pos=(hstep, 650))
Setbutton.Bind(wx.EVT_BUTTON, self.Set)
def Set(self, e):
try:
self.par.core.option_handler.boundaries[0] = [float(n.GetValue()) for n in self.min]
self.par.core.option_handler.boundaries[1] = [float(n.GetValue()) for n in self.max]
self.par.resolution=int(self.resolution_ctrl.GetValue())
except ValueError as ve:
wx.MessageBox(str(ve), "Invalid Value", wx.OK | wx.ICON_ERROR)
#self.boundaries_window.Destroy()
self.par.DisplayGrid()
def my_close(self, e):
wx.Exit()
class stimuliwindow2(wx.Frame):
def __init__(self, par):
self.container=[]
class stimuliwindow(wx.Frame):
def __init__(self, par, core):
self.core =core
self.stimuli_window = wx.Frame(par.panel, wx.ID_ANY, "Set Amplitude(s)", size=(400, 500))
self.panel = wx.Panel(self.stimuli_window)
self.par = par
self.container = []
wx.StaticText(self.panel, label="Number of stimuli:", pos=(10, 10))
self.generate = wx.Button(self.panel, label="Create", pos=(250, 10))
self.generate.Bind(wx.EVT_BUTTON, self.Set)
#self.load_waveform = wx.Button(self.panel, label="Time Varying\nStimulus", pos=(250, 50))
#self.load_waveform.Bind(wx.EVT_BUTTON, self.Load)
self.number = wx.TextCtrl(self.panel, id=wx.ID_ANY, pos=(150, 10), size=(50, 30))
self.accept = wx.Button(self.panel, label="Accept", pos=(200, 450))
self.accept.Disable()
self.accept.Bind(wx.EVT_BUTTON, self.Accept)
if self.core.option_handler.type[-1]=="features":
self.number.SetValue((str(len(self.core.data_handler.features_data["stim_amp"]))))
self.Set(self)
self.stimuli_window.Show()
def Set(self, e):
self.temp = []
hstep = 200
vstep = 35
hoffset = 10
voffset = 50
unit="nA" if self.par.dd_type.GetSelection()==0 else "mV"
for l in range(min(10, int(self.number.GetValue()))):
wx.StaticText(self.panel, label="Amplitude" + str(l+1) + " ("+unit+"):", pos=(hoffset, voffset + l * vstep))
tmp_obj = wx.TextCtrl(self.panel, id=l, pos=(hstep / 2+25, voffset + l * vstep), size=(75, 30))
if self.core.option_handler.type[-1]=="features":
tmp_obj.SetValue(str(self.core.data_handler.features_data["stim_amp"][l]))
self.temp.append(tmp_obj)
self.accept.Enable()
self.stimuli_window.Show()
def Accept(self, e):
for n in range(len(self.temp)):
self.container.append(float(self.temp[n].GetValue()))
self.stimuli_window.Hide()
#def Load(self, e):
def my_close(self, e):
wx.Exit()
class ErrorDialog(wx.Frame):
def __init__(self, par):
wx.Frame.__init__(self,par, wx.ID_PROPERTIES, "Detailed Error", size=(700, 400))
panel = wx.Panel(self)
self.parent = par
self.error_comp_table = wx.ListCtrl(panel,pos=(10,10),size=(600,300),style=wx.LC_REPORT | wx.BORDER_SUNKEN)
self.error_comp_table.InsertColumn(0, 'Error Function', width=200)
self.error_comp_table.InsertColumn(1, 'Value', width=200)
self.error_comp_table.InsertColumn(2, 'Weight', width=200)
self.error_comp_table.InsertColumn(3, 'Weighted Value', width=200)
tmp_w_sum=0
c_idx=0
for t in self.parent.core.error_comps:
for c in t:
#tmp_str.append( "*".join([str(c[0]),c[1].__name__]))
if self.parent.core.option_handler.type[-1]!="features":
idx=self.error_comp_table.InsertStringItem(c_idx,self.parent.core.ffun_mapper[c[1].__name__])
else:
idx=self.error_comp_table.InsertStringItem(c_idx,c[1])
self.error_comp_table.SetStringItem(idx,1,str(c[2]))
self.error_comp_table.SetStringItem(idx,2,str(c[0]))
self.error_comp_table.SetStringItem(idx,3,str(c[0]*c[2]))
c_idx+=1
tmp_w_sum +=c[0]*c[2]
c_idx+=1
idx=self.error_comp_table.InsertStringItem(c_idx,"Weighted Sum")
self.error_comp_table.SetStringItem(idx,1,"-")
self.error_comp_table.SetStringItem(idx,2,"-")
self.error_comp_table.SetStringItem(idx,3,str(tmp_w_sum))
#print str(tmp_w_sum)
tmp_w_sum=0
class MyDialog(wx.Dialog):
def __init__(self, parent, *args, **kw):
super(MyDialog, self).__init__(*args, **kw)
self.parent = parent
#panel=wx.Panel(self,size=(300,250))
#wx.StaticText(self,label="#Please define your function below!\n#The first uncommented line should contain\neither the word python or hoc.\n#This would tell the compiler \nwhich language do you use.",id=wx.ID_ANY,pos=(465,10),style=wx.TE_MULTILINE)
self.string = wx.TextCtrl(self, id=wx.ID_ANY, pos=(10, 10), size=(450, 400), style=wx.TE_MULTILINE | wx.TE_AUTO_URL | wx.TE_PROCESS_TAB)
self.string.SetValue("#Please define your function below in the template!\n"+
"#You may choose an arbitrary name for your function,\n"+
"#but the input parameters must be self and a vector!In the first line of the function specify the length of the vector in a comment!\n"+
"#In the next lines you may specify the names of the parameters in separate comments.\n"+
"def usr_fun(self,v):")
okButton = wx.Button(self, label='Ok', pos=(50, 420))
closeButton = wx.Button(self, label='Close', pos=(200, 420))
okButton.Bind(wx.EVT_BUTTON, self.OnOk)
closeButton.Bind(wx.EVT_BUTTON, self.OnClose)
loadButton = wx.Button(self, label="Load", pos=(470, 20))
loadButton.Bind(wx.EVT_BUTTON, self.OnLoad)
def OnOk(self, e):
try:
#print self.string.GetValue()
self.parent.core.option_handler.u_fun_string = str(self.string.GetValue())
self.parent.core.option_handler.adjusted_params=[]
self.parent.model.DeleteAllItems()
text = ""
text = map(strip, str(self.string.GetValue()).split("\n"))[4:-1]
#print text
variables = []
variables = map(strip, str(text[0][text[0].index("(") + 1:text[0].index(")")]).split(","))
#print variables
var_len = int(text[1].lstrip("#"))
#print var_len
i=0
var_names=[]
while text[i+2][0]=="#" and i<var_len:
var_names.append(text[i+2].lstrip("#"))
i+=1
if len(var_names)!=var_len and len(var_names)!=0:
raise SyntaxError("Number of parameter names must equal to number of parameters")
if var_names==[]:
var_names=None
for i in range(var_len):
self.parent.core.option_handler.SetOptParam(0.1)
if var_names != None:
self.parent.core.option_handler.SetObjTOOpt(var_names[i])
else:
self.parent.core.option_handler.SetObjTOOpt("Vector" + "[" + str(i) + "]")
#print variables, variables[0]
if variables[0] == '':
raise ValueError
compile(self.string.GetValue(), '<string>', 'exec')
self.parent.toolbar.EnableTool(888, True)
self.Destroy()
except ValueError as val_err:
print val_err
wx.MessageBox("Your function doesn't have any input parameters!", "Error", wx.OK | wx.ICON_ERROR)
except SyntaxError as syn_err:
wx.MessageBox(str(syn_err), "Syntax Error", wx.OK | wx.ICON_ERROR)
def OnClose(self, e):
self.Destroy()
def OnLoad(self, e):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fun_file_path = dlg.GetDirectory() + "/" + dlg.GetFilename()
dlg.Destroy()
f = open(fun_file_path, "r")
fun = ("#Please define your function below in the template!\n"+
"#You may choose an arbitrary name for your function,\n"+
"#but the input parameters must be self and a vector!In the first line of the function specify the length of the vector in a comment!\n"+
"#In the second line you may specify the names of the parameters in a comment, separated by spaces.\n")
for l in f:
fun = fun + l
self.string.SetValue(fun)
class MyDialog2(wx.Dialog):
def __init__(self,parent,*args,**kwargs):
super(MyDialog2,self).__init__(parent,title="Starting Points")
self.parent = parent
self.Bind(wx.EVT_CLOSE,self.OnClose)
n_o_params=args[0]
self.container=[]
self.vals=args[1]
self.SetSize((500,min(150*n_o_params+1,600)))
_sizer=wx.GridSizer(n_o_params+1,2,20,200)
# row_sizer=wx.BoxSizer(wx.HORIZONTAL)
# col_sizer1=wx.BoxSizer(wx.VERTICAL)
# col_sizer2=wx.BoxSizer(wx.VERTICAL)
for n in range(n_o_params):
param=self.parent.core.option_handler.GetObjTOOpt()[n].split()
if len(param)==4:
p_name=param[0] + " " + param[1] + " " + param[3]
else:
if param[0]!=param[-1]:
p_name=param[0] + " " + param[-1]
else:
p_name=param[-1]
#p_name=self.parent.core.option_handler.GetObjTOOpt()[n].split()[-1]
p_name_txt=wx.StaticText(self,label=p_name)
ctrl=wx.TextCtrl(self,wx.ID_ANY,size=(100,30))
self.container.append(ctrl)
#col_sizer1.Add(p_name_txt,flag=wx.UP,border=15)
#col_sizer2.Add(ctrl,flag=wx.UP,border=15)
_sizer.Add(p_name_txt,flag=wx.LEFT | wx.UP,border=5)
_sizer.Add(ctrl,flag=wx.LEFT | wx.UP,border=5)
b_ok=wx.Button(self,label="Ok")
b_close=wx.Button(self,label="Cancel")
b_ok.Bind(wx.EVT_BUTTON, self.OnOk)
b_close.Bind(wx.EVT_BUTTON, self.OnClose)
b_load=wx.Button(self,label="Load Point")
b_load.Bind(wx.EVT_BUTTON, self.OnLoad)
b_load_pop=wx.Button(self,label="Load Population")
b_load_pop.Bind(wx.EVT_BUTTON, self.OnLoadPop)
# col_sizer1.Add(b_ok,flag=wx.UP,border=15)
# col_sizer2.Add(b_close,flag=wx.UP,border=15)
# row_sizer.Add(col_sizer1,flag=wx.LEFT,border=20)
# row_sizer.Add(col_sizer2,flag=wx.LEFT,border=50)
# self.SetSizer(row_sizer)
_sizer.Add(b_ok,flag=wx.LEFT | wx.UP,border=5)
_sizer.Add(b_close,flag=wx.LEFT | wx.UP,border=5)
_sizer.Add(b_load,flag=wx.LEFT | wx.UP,border=5)
_sizer.Add(b_load_pop,flag=wx.LEFT | wx.UP,border=5)
self.SetSizer(_sizer)
def OnOk(self,e):
try:
for n in self.container:
self.vals.append(float(n.GetValue()))
self.Destroy()
except ValueError:
wx.MessageBox("You must give every parameter an initial value!", "Error", wx.OK | wx.ICON_ERROR)
def OnClose(self,e):
self.vals=None
self.Destroy()
def OnLoad(self, e):
file_path = ""
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
file_path = dlg.GetDirectory() + "/" + dlg.GetFilename()
dlg.Destroy()
f = open(file_path, "r")
for idx, l in enumerate(f):
self.container[idx].SetValue(str(l))
def OnLoadPop(self, e):
self.size_of_pop = 0
file_path = ""
dlg1 = wx.MessageDialog(self, "This function is only supported by the algorithms from inspyred!", style=wx.OK | wx.CANCEL)
if dlg1.ShowModal() == wx.ID_OK:
dlg2 = wx.TextEntryDialog(self, "Enter size of population", caption="Replace this with an arbitrary number", style=wx.OK | wx.CANCEL)
if dlg2.ShowModal() == wx.ID_OK:
self.size_of_pop = int(dlg2.GetValue())
dlg3 = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg3.ShowModal() == wx.ID_OK:
file_path = dlg3.GetDirectory() + "/" + dlg3.GetFilename()
dlg3.Destroy()
dlg2.Destroy()
dlg1.Destroy()
else:
dlg2.Destroy()
dlg1.Destroy()
else:
dlg1.Destroy()
def lastlines(hugefile, n, bsize=2048):
import errno
hfile = open(hugefile, 'rU')
if not hfile.readline():
return
sep = hfile.newlines
hfile.close()
hfile = open(hugefile, 'rb')
hfile.seek(0, os.SEEK_END)
linecount = 0
pos = 0
while linecount <= n:
try:
hfile.seek(-bsize, os.SEEK_CUR)
linecount += hfile.read(bsize).count(sep)
hfile.seek(-bsize, os.SEEK_CUR)
except IOError, e:
if e.errno == errno.EINVAL:
# Attempted to seek past the start, can't go further
bsize = hfile.tell()
hfile.seek(0, os.SEEK_SET)
linecount += hfile.read(bsize).count(sep)
pos = hfile.tell()
hfile.close()
hfile = open(hugefile, 'r')
hfile.seek(pos, os.SEEK_SET) # our file position from above
for line in hfile:
# We've located n lines *or more*, so skip if needed
if linecount > n:
linecount -= 1
continue
# The rest we yield
yield line
for l in lastlines(file_path, self.size_of_pop, 1):
s=l.strip()
#print s
params = map(lambda x: float(x.lstrip("[").rstrip("]")), s.split(", "))[3:-1]
params = params[0:len(params) / 2 + 1]
self.vals.append(params)
self.Destroy()
class inputLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.core = core
#this will need to be wrapped in a try statement later:
import optimizer
path = os.path.dirname(optimizer.__file__)
self.path = path
self.layer = None
self.panel = wx.Panel(self)
self.Center()
self.ToolbarCreator()
self.Design()
self.Show(True)
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_fward = self.toolbar.AddLabelTool(887, 'NextLayer', wx.Bitmap(self.path + "/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.toolbar.EnableTool(button_toolbar_fward.GetId(), False)
def Design(self):
self.horizontal_box1 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box2 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box3 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box4 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box5 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box6 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box7 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box8 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box9 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box10 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box11 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box12 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box13 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box14 = wx.BoxSizer(wx.HORIZONTAL)
#self.horizontal_box15 = wx.BoxSizer(wx.HORIZONTAL)
self.vertical_box1 = wx.BoxSizer(wx.VERTICAL)
self.vertical_box2 = wx.BoxSizer(wx.VERTICAL)
heading = wx.StaticText(self.panel, label='File Options')
heading.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.horizontal_box1.Add(heading, flag=wx.BOTTOM, border=10)
# wx.StaticLine(self.panel, pos=(1, 0), size=(self.Size[0],1))
# wx.StaticLine(self.panel, pos=(1, 215), size=(self.Size[0],1))
descr1 = wx.StaticText(self.panel, label='Input File')
self.horizontal_box2.Add(descr1)
self.input_file_controll = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(300, 30), name="Input Location")
self.input_file_controll.SetHelpText("Location of input trace")
self.horizontal_box3.Add(self.input_file_controll)
browser1 = wx.Button(self.panel, label="Browse...")
browser1.Bind(wx.EVT_BUTTON, self.BrowseFile)
self.horizontal_box3.Add(browser1, flag=wx.LEFT, border=10)
self.time_checker = wx.CheckBox(self.panel, wx.ID_ANY, label="Contains time")
self.horizontal_box3.Add(self.time_checker, flag=wx.LEFT, border=10)
self.type_selector = wx.Choice(self.panel, wx.ID_ANY)
#enable this later
#self.type_selector.AppendItems(["Voltage trace", "Current trace", "Spike times", "Other"])
self.type_selector.AppendItems(["Voltage trace", "Current trace", "Features", "Other"])
self.type_selector.SetSelection(0)
self.type_selector.Bind(wx.EVT_CHOICE, self.typeChanged)
self.horizontal_box3.Add(self.type_selector, flag=wx.LEFT, border=10)
descr2 = wx.StaticText(self.panel, label='Base Directory')
self.horizontal_box4.Add(descr2)
self.input_file_controll.WriteText(os.getcwd())
self.base_dir_controll = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(300, 30), name="Base Location")
self.horizontal_box5.Add(self.base_dir_controll)
self.base_dir_controll.WriteText(os.getcwd())
browser2 = wx.Button(self.panel, label="Browse...")
browser2.Bind(wx.EVT_BUTTON, self.BrowseDir)
self.horizontal_box5.Add(browser2, flag=wx.LEFT, border=10)
self.input_tree=wx.TreeCtrl(self.panel,wx.ID_ANY,pos=(425,155),size=(250,100),style=wx.TR_HAS_BUTTONS | wx.TR_EXTENDED)
self.troot=self.input_tree.AddRoot("Input data")
self.tvoltage=None
self.tcurrent=None
self.tspike_t=None
self.tother=None
self.tfeatures=None
#enable this later
self.loaded_input_types=[self.tvoltage ,
self.tcurrent ,
# self.tspike_t ,
# self.tother,
self.tfeatures]
descr3 = wx.StaticText(self.panel, label='Number of traces')
self.horizontal_box6.Add(descr3, flag=wx.UP, border=30)
descr6 = wx.StaticText(self.panel, label='Units')
self.horizontal_box6.Add(descr6, flag=wx.UP | wx.LEFT, border=30)
self.size_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, pos=(10, 245), size=(100, 30), name="NO traces")
self.horizontal_box7.Add(self.size_ctrl)
self.dropdown = wx.Choice(self.panel, wx.ID_ANY, (150, 245))
self.dropdown.SetSize((100, 30))
self.dropdown.AppendItems(Core.scales[str(self.type_selector.GetItems()[self.type_selector.GetCurrentSelection()]).split()[0].lower()].keys())
self.dropdown.Select(1)
self.horizontal_box7.Add(self.dropdown, flag=wx.LEFT, border=50)
descr4 = wx.StaticText(self.panel, label='Length of traces (ms)')
self.horizontal_box8.Add(descr4)
self.length_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, pos=(10, 325), size=(100, 30), name="Length")
self.horizontal_box9.Add(self.length_ctrl)
descr5 = wx.StaticText(self.panel, label='Sampling frequency (Hz)')
self.horizontal_box10.Add(descr5)
self.freq_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, pos=(10, 405), size=(100, 30), name="Frequency")
self.horizontal_box11.Add(self.freq_ctrl)
self.load = wx.Button(self.panel, label="Load trace", pos=(10, 445))
self.load.Disable()
self.load.Bind(wx.EVT_BUTTON, self.Load)
self.horizontal_box12.Add(self.load)
self.vertical_box1.Add(self.horizontal_box1, flag=wx.ALL, border=10)
self.vertical_box1.Add(self.horizontal_box2, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box3, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box4, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box5, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box6, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box7, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box8, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box9, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box10, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box11, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box12, flag=wx.ALL, border=5)
self.vertical_box1.Add(self.horizontal_box13, flag=wx.ALL, border=5)
self.vertical_box2.Add(self.vertical_box1, flag=wx.ALL, border=10)
self.panel.SetSizer(self.vertical_box2)
#event functions
def Next(self, e):
if self.core.option_handler.output_level=="1":
self.core.Print()
try:
self.layer.Show()
except AttributeError:
self.layer = modelLayer(self, 1, self.Size, "Model & Parameter Selection", self.core, self.path)
self.layer.Show()
self.Hide()
def typeChanged(self,e):
self.dropdown.Clear()
self.dropdown.AppendItems(Core.scales[str(self.type_selector.GetItems()[self.type_selector.GetCurrentSelection()]).split()[0].lower()].keys())
self.dropdown.Select(1)
def BrowseFile(self, e):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.input_file_controll.Clear()
self.input_file = dlg.GetDirectory() + "/" + dlg.GetFilename()
self.input_file_controll.WriteText(self.input_file)
self.base_dir_controll.Clear()
self.base_dir_controll.WriteText(dlg.GetDirectory())
dlg.Destroy()
self.load.Enable()
def BrowseDir(self, e):
dlg = wx.DirDialog(self, "Choose a directory", defaultPath=self.base_dir_controll.GetValue(), style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.base_dir_controll.Clear()
self.base_dir = dlg.GetPath()
self.base_dir_controll.WriteText(self.base_dir)
dlg.Destroy()
def my_close(self, e):
wx.Exit()
def Load(self, e):
self.toolbar.EnableTool(887, True)
if (str(self.type_selector.GetItems()[self.type_selector.GetCurrentSelection()]).split()[0].lower() == 'features'):
try:
kwargs = {"file" : str(self.base_dir_controll.GetValue()),
"input" : [str(self.input_file_controll.GetValue()),
None,
str(self.dropdown.GetItems()[self.dropdown.GetCurrentSelection()]),
None,
None,
None,
str(self.type_selector.GetItems()[self.type_selector.GetCurrentSelection()]).split()[0].lower()]}
except ValueError as ve:
wx.MessageBox('The input file or the type is missing. Please give them', 'Error', wx.OK | wx.ICON_ERROR)
print ve
else:
try:
kwargs = {"file" : str(self.base_dir_controll.GetValue()),
"input" : [str(self.input_file_controll.GetValue()),
int(self.size_ctrl.GetValue()),
str(self.dropdown.GetItems()[self.dropdown.GetCurrentSelection()]),
int(self.length_ctrl.GetValue()),
int(self.freq_ctrl.GetValue()),
self.time_checker.IsChecked(),
str(self.type_selector.GetItems()[self.type_selector.GetCurrentSelection()]).split()[0].lower()]}
except ValueError as ve:
wx.MessageBox('Some of the cells are empty. Please fill out all of them!', 'Error', wx.OK | wx.ICON_ERROR)
print ve
self.core.FirstStep(kwargs)
if self.type_selector.GetSelection()==0 or self.type_selector.GetSelection()==1 or self.type_selector.GetSelection()==3:
canvas = wx.Panel(self.panel, pos=(300, 270), size=(400, self.GetSize()[1]))
figure = Figure(figsize=(5, 3))
axes = figure.add_axes([0.15, 0.15, 0.8, 0.8])
FigureCanvas(canvas,-1, figure)
#self.panel.Fit()
self.Show()
#this part is not working yet
f = self.core.option_handler.input_freq
t = self.core.option_handler.input_length
no_traces=self.core.option_handler.input_size
axes.set_xticks([n for n in range(0, int((t*no_traces)/(1000.0/f)), int((t*no_traces)/(1000.0/f)/5.0)) ])
axes.set_xticklabels([str(n) for n in range(0, t*no_traces, (t*no_traces)/5)])
axes.set_xlabel("time [ms]")
_type="voltage" if self.type_selector.GetSelection()==0 else "current" if self.type_selector.GetSelection()==1 else "unkown"
#unit="V" if self.type_selector.GetSelection()==0 else "A" if self.type_selector.GetSelection()==1 else ""
axes.set_ylabel(_type+" [" + self.core.option_handler.input_scale + "]")
canvas.Fit()
canvas.Show()
exp_data = []
for k in range(self.core.data_handler.number_of_traces()):
exp_data.extend(self.core.data_handler.data.GetTrace(k))
axes.plot(range(0, len(exp_data)), exp_data)
if self.type_selector.GetSelection()==0:
for n in filter(lambda x: x[1]!=None and x[0]!=2,enumerate(self.loaded_input_types)):
self.input_tree.Delete(n[1])
self.loaded_input_types[n[0]]=None
self.tvoltage=self.input_tree.AppendItem(self.troot,"Voltage trace")
self.loaded_input_types[0]=self.tvoltage
self.input_tree.AppendItem(self.tvoltage,self.input_file_controll.GetValue().split("/")[-1])
elif self.type_selector.GetSelection()==1:
for n in filter(lambda x: x[1]!=None and x[0]!=2,enumerate(self.loaded_input_types)):
self.input_tree.Delete(n[1])
self.loaded_input_types[n[0]]=None
self.tcurrent=self.input_tree.AppendItem(self.troot,"Current trace")
self.loaded_input_types[1]=self.tcurrent
self.input_tree.AppendItem(self.tcurrent,self.input_file_controll.GetValue().split("/")[-1])
'''
elif self.type_selector.GetSelection()==3:
try:
self.input_tree.Delete(self.tspike_t)
except ValueError:
pass
self.tspike_t=self.input_tree.AppendItem(self.troot,"Spike times")
self.input_tree.AppendItem(self.tspike_t,self.input_file_controll.GetValue().split("/")[-1])
'''
elif self.type_selector.GetSelection()==2:
for n in filter(lambda x: x[1]!=None and x[0]!=2,enumerate(self.loaded_input_types)):
self.input_tree.Delete(n[1])
self.loaded_input_types[n[0]]=None
self.tfeatures=self.input_tree.AppendItem(self.troot,"Features")
self.loaded_input_types[2]=self.tfeatures
features_file=self.input_tree.AppendItem(self.tfeatures,self.input_file_controll.GetValue().split("/")[-1])
self.add_data_dict(self.core.data_handler.features_dict, features_file)
else:
pass
def add_data_dict(self,data_dict, root):
stack = data_dict.items()
while stack:
key, value = stack.pop()
if isinstance(value, dict):
self.input_tree.AppendItem(root, "{0} : ".format(key))
stack.extend(value.iteritems())
else:
self.input_tree.AppendItem(root, " {0} : {1}".format(key, value))
class modelLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.panel = wx.Panel(self)
self.parent = parent
self.core = core
self.layer = None
#this will need to be wrapped in a try statement later:
import optimizer
#print optimizer.__file__
path = os.path.dirname(optimizer.__file__)
self.path = path
#print "path",self.path
self.Center()
self.ToolbarCreator()
self.Design()
#self.is_loaded=False
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_bward = self.toolbar.AddLabelTool(wx.ID_ANY, 'PrevLayer', wx.Bitmap(self.path + "/2leftarrow.png"))
button_toolbar_fward = self.toolbar.AddLabelTool(888, 'NextLayer', wx.Bitmap(self.path + "/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
self.toolbar.EnableTool(button_toolbar_fward.GetId(), False)
def Design(self):
self.horizontal_box1 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box2 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box3 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box4 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box5 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box6 = wx.BoxSizer(wx.HORIZONTAL)
self.horizontal_box7 = wx.BoxSizer(wx.HORIZONTAL)
self.vertical_box1 = wx.BoxSizer(wx.VERTICAL)
self.vertical_box2 = wx.BoxSizer(wx.VERTICAL)
self.vertical_box3 = wx.BoxSizer(wx.VERTICAL)
heading = wx.StaticText(self.panel, label='Model Options')
heading.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.horizontal_box1.Add(heading)
# wx.StaticLine(self.panel, pos=(1, 0), size=(self.Size[0],1))
# wx.StaticLine(self.panel, pos=(400, 215), size=(self.Size[0],1))
# wx.StaticLine(self.panel, pos=(1, 175), size=(self.Size[0]/2,1))
# wx.StaticLine(self.panel, pos=(400, 0), size=(1,215),style=wx.LI_VERTICAL)
descr1 = wx.StaticText(self.panel, label='Model File')
self.horizontal_box2.Add(descr1)
self.model_file_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(300, 30), name="Model File")
self.model_file_ctrl.WriteText(self.core.option_handler.base_dir)
self.browser1 = wx.Button(self.panel, label="Browse...")
self.browser1.Bind(wx.EVT_BUTTON, self.BrowseFile)
self.dd_type = wx.Choice(self.panel, wx.ID_ANY, size=(150, 30))
self.dd_type.AppendItems(["Neuron", "external"])
self.dd_type.Select(0)
self.dd_type.Bind(wx.EVT_CHOICE, self.selectType)
self.load = wx.Button(self.panel, label="Load")
self.load.Bind(wx.EVT_BUTTON, self.Load)
self.horizontal_box3.Add(self.model_file_ctrl, flag=wx.RIGHT, border=50)
self.horizontal_box3.Add(self.browser1, flag=wx.RIGHT, border=15)
self.horizontal_box3.Add(self.load, flag=wx.RIGHT, border=15)
self.horizontal_box3.Add(self.dd_type, flag=wx.RIGHT, border=15)
descr2 = wx.StaticText(self.panel, label='Special File Location')
self.horizontal_box4.Add(descr2)
descr3 = wx.StaticText(self.panel, label='Command to external simulator')
self.horizontal_box4.Add(descr3, flag=wx.LEFT, border=315)
self.spec_file_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(300, 30), name="Special File Location")
self.spec_file_ctrl.WriteText(self.core.option_handler.base_dir)
self.browser2 = wx.Button(self.panel, label="Browse...")
self.browser2.Bind(wx.EVT_BUTTON, self.BrowseDir)
self.sim_path = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(250, 30), name="External simulator path")
self.sim_path.Disable()
self.horizontal_box5.Add(self.spec_file_ctrl, flag=wx.RIGHT, border=50)
self.horizontal_box5.Add(self.browser2, flag=wx.RIGHT, border=15)
self.horizontal_box5.Add(self.sim_path, flag=wx.RIGHT, border=15)
self.model_file = self.model_file_ctrl.GetValue()
self.spec_file = self.spec_file_ctrl.GetValue()
self.vertical_box1.Add(self.horizontal_box1, flag=wx.BOTTOM, border=15)
self.vertical_box1.Add(self.horizontal_box2, flag=wx.BOTTOM, border=5)
self.vertical_box1.Add(self.horizontal_box3, flag=wx.BOTTOM, border=5)
self.vertical_box1.Add(self.horizontal_box4, flag=wx.BOTTOM, border=5)
self.vertical_box1.Add(self.horizontal_box5, flag=wx.BOTTOM, border=15)
self.vertical_box1.Add(self.horizontal_box6, flag=wx.BOTTOM, border=5)
descr4 = wx.StaticText(self.panel, label='Model & Parameter adjustment', pos=(10, 185))
descr4.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.horizontal_box6.Add(descr4)
self.model = wx.ListCtrl(self.panel, pos=(20, 220), size=(600, 300), style=wx.LC_REPORT | wx.BORDER_SUNKEN)
self.model.InsertColumn(0, 'Section', width=125)
self.model.InsertColumn(1, 'Segment', width=100)
self.model.InsertColumn(2, 'Mechanism', width=150)
self.model.InsertColumn(3, 'Parameter', width=200)
self.horizontal_box7.Add(self.model)
self.user_function = wx.Button(self.panel, label="Define Function", pos=(175, 25))
self.user_function.Bind(wx.EVT_BUTTON, self.UF)
self.setter = wx.Button(self.panel, label="Set", pos=(650, 385))
self.setter.Bind(wx.EVT_BUTTON, self.Set)
self.remover = wx.Button(self.panel, label="Remove", pos=(650, 445))
self.remover.Bind(wx.EVT_BUTTON, self.Remove)
self.remover.Disable()
self.vertical_box2.Add(self.user_function, flag=wx.BOTTOM, border=50)
self.vertical_box2.Add(self.setter, flag=wx.BOTTOM, border=10)
self.vertical_box2.Add(self.remover, flag=wx.BOTTOM, border=15)
self.horizontal_box7.Add(self.vertical_box2, flag=wx.LEFT, border=25)
self.vertical_box1.Add(self.horizontal_box7)
self.vertical_box3.Add(self.vertical_box1, flag=wx.ALL, border=10)
self.panel.SetSizer(self.vertical_box3)
#event functions
def Next(self, e):
try:
#self.core.SecondStep({"stim" : [str(self.dd_type.GetItems()[self.dd_type.GetCurrentSelection()]),float(self.pos_ctrl.GetValue()),str(self.dd_sec1.GetItems()[self.dd_sec1.GetCurrentSelection()])],"stimparam" : [self.stim_window.container,float(self.del_ctrl.GetValue()),float(self.dur_ctrl.GetValue())]})
self.Hide()
self.layer.Show()
except ValueError:
wx.MessageBox('Some of the cells are empty. Please fill out all of them!', 'Error', wx.OK | wx.ICON_ERROR)
self.Show()
self.layer.Hide()
#layer.Destroy()
except AttributeError:
# self.run_controll_tstop = options[0]
# self.run_controll_dt = options[1]
# self.run_controll_record = options[2]
# self.run_controll_sec = options[3]
# self.run_controll_pos = options[4]
# self.run_controll_vrest = options[5]
if self.core.option_handler.type[-1]!="features":
self.kwargs={"runparam" : [self.core.data_handler.data.t_length,
self.core.data_handler.data.step,
"record",
"sec",
"pos",
"vrest"]
}
else:
self.kwargs={"runparam" : [self.core.data_handler.features_data["stim_delay"] + self.core.data_handler.features_data["stim_duration"]+100,
0.05,
"record",
"sec",
"pos",
"vrest"]
}
if self.dd_type.GetSelection() == 1:
self.layer = ffunctionLayer(self, 4, self.Size, "Select Fitness Function", self.core, self.path, self.kwargs)
else:
self.layer = stimuliLayer(self, 2, self.Size, "Stimuli & Recording Settings", self.core, self.path)
self.Hide()
self.layer.Show()
if self.core.option_handler.output_level=="1":
self.core.Print()
def Prev(self, e):
self.Hide()
self.parent.Show()
def BrowseFile(self, e):
dlg = wx.FileDialog(self, "Choose a file", self.core.option_handler.base_dir, "", "*.hoc*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.model_file_ctrl.Clear()
self.model_file = dlg.GetDirectory() + "/" + dlg.GetFilename()
self.model_file_ctrl.WriteText(self.model_file)
self.spec_file = dlg.GetDirectory()
self.spec_file_ctrl.Clear()
self.spec_file_ctrl.WriteText(self.spec_file)
dlg.Destroy()
def BrowseDir(self, e):
dlg = wx.DirDialog(self, "Choose a directory", self.core.option_handler.base_dir, style=wx.DD_DEFAULT_STYLE)
if dlg.ShowModal() == wx.ID_OK:
self.spec_file_ctrl.Clear()
self.spec_file = dlg.GetPath()
self.spec_file_ctrl.WriteText(self.spec_file)
dlg.Destroy()
def Set(self, e):
item_selected = self.model.GetFirstSelected()
if item_selected != -1:
self.remover.Enable()
#try to use the table for selection
section = str(self.model.GetItem(item_selected, 0).GetText())
#
segment = str(self.model.GetItem(item_selected, 1).GetText())
chan = str(self.model.GetItem(item_selected, 2).GetText())
morph=""
par = str(self.model.GetItem(item_selected, 3).GetText())
if chan == "morphology":
chan = "None"
par= "None"
morph = str(self.model.GetItem(item_selected, 3).GetText())
kwargs = {"section" : section,
"segment" : segment,
"channel" : chan,
"morph" : morph,
"params" : par,
"values" : 0}
searchValue = [kwargs["section"], kwargs["segment"], kwargs["params"], kwargs["morph"]]
if True:
for idx in range(self.model.GetItemCount()):
item = self.model.GetItem(idx, 3)
item1 = self.model.GetItem(idx, 1)
item2 = self.model.GetItem(idx, 2)
item0 = self.model.GetItem(idx, 0)
if (item0.GetText() == searchValue[0] and item1.GetText() == searchValue[1])and(item.GetText() == searchValue[2] or item2.GetText() == searchValue[3]):
self.model.SetItemBackgroundColour(idx, "red")
self.core.SetModel2(kwargs)
else:
for idx in range(self.model.GetItemCount()):
item = self.model.GetItem(idx, 3)
item1 = self.model.GetItem(idx, 1)
item2 = self.model.GetItem(idx, 2)
item0 = self.model.GetItem(idx, 0)
if (item0.GetText() == searchValue[0] and item1.GetText() == searchValue[1])and(item.GetText() == searchValue[2] or item2.GetText() == searchValue[3]):
self.model.SetItemBackgroundColour(idx, "green")
self.core.SetModel(kwargs)
self.toolbar.EnableTool(888, True)
def Remove(self, e):
item_selected = self.model.GetFirstSelected()
if item_selected != -1:
#try to use the table for selection
section = str(self.model.GetItem(item_selected, 0).GetText())
#
segment = str(self.model.GetItem(item_selected, 1).GetText())
chan = str(self.model.GetItem(item_selected, 2).GetText())
morph=""
par = str(self.model.GetItem(item_selected, 3).GetText())
if chan == "morphology":
chan= "None"
par= "None"
morph = str(self.model.GetItem(item_selected, 3).GetText())
kwargs = {"section" : section,
"segment" : segment,
"channel" : chan,
"morph" : morph,
"params" : par}
if kwargs["channel"] == "None":
temp = kwargs["section"] + " " + kwargs["morph"]
else:
temp = kwargs["section"] + " " + kwargs["segment"] + " " + kwargs["channel"] + " " + kwargs["params"]
self.core.option_handler.param_vals.pop(self.core.option_handler.GetObjTOOpt().index(temp))
self.core.option_handler.adjusted_params.remove(temp)
if len(self.core.option_handler.GetObjTOOpt()) == 0:
self.remover.Disable()
searchValue = [kwargs["section"], kwargs["segment"], kwargs["params"], kwargs["morph"]]
for idx in range(self.model.GetItemCount()):
item = self.model.GetItem(idx, 3)
item1 = self.model.GetItem(idx, 1)
item2 = self.model.GetItem(idx, 2)
item0 = self.model.GetItem(idx, 0)
if (item0.GetText() == searchValue[0] and item1.GetText() == searchValue[1])and(item.GetText() == searchValue[2] or item2.GetText() == searchValue[3]):
self.model.SetItemBackgroundColour(idx, "white")
def Load(self, e):
self.model.DeleteAllItems()
try:
self.core.LoadModel({"model" : [self.model_file, self.spec_file],
"simulator" : self.dd_type.GetItems()[self.dd_type.GetSelection()],
"sim_command" : self.sim_path.GetValue()})
temp = self.core.model_handler.GetParameters()
#print temp
if temp!=None:
out = open("model.txt", 'w')
for i in temp:
out.write(str(i))
out.write("\n")
index = 0
for row in temp:
#self.model.InsertStringItem(index,row[0])
#print row[1]
for k in (row[1]):
if k != []:
#.model.InsertStringItem(index, row[0])
#self.model.SetStringItem(index, 2, k[0])
for s in (k[2]):
self.model.InsertStringItem(index, row[0])
self.model.SetStringItem(index, 1, str(k[0]))
self.model.SetStringItem(index, 2, k[1])
self.model.SetStringItem(index, 3, s)
index += 1
else:
self.toolbar.EnableTool(888, True)
except OSError:
wx.MessageBox('Path error! Please use absolute path!', 'Error', wx.OK | wx.ICON_ERROR)
def selectType(self, e):
#edit number according the number of options in the construction of dd_type
if self.dd_type.GetSelection() == 1:
self.spec_file_ctrl.Disable()
self.model_file_ctrl.Disable()
#self.load.Disable()
self.browser1.Disable()
self.browser2.Disable()
self.user_function.Disable()
self.setter.Disable()
self.sim_path.Enable()
self.load.SetLabel("Set")
else:
self.spec_file_ctrl.Enable()
self.model_file_ctrl.Enable()
#self.load.Enable()
self.browser1.Enable()
self.browser2.Enable()
self.user_function.Enable()
self.setter.Enable()
self.sim_path.Disable()
self.load.SetLabel("Load")
def UF(self, e):
dlg = MyDialog(self, self.parent, size=(600, 450), title="User Defined Function")
dlg.ShowModal()
def my_close(self, e):
wx.Exit()
class stimuliLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.core = core
self.panel = wx.Panel(self)
self.parent = parent
#this will need to be wrapped in a try statement later:
import optimizer
#print optimizer.__file__
path = os.path.dirname(optimizer.__file__)
self.path = path
#print "path",self.path
self.Center()
self.ToolbarCreator()
self.Design()
self.seed = None
self.toolbar.EnableTool(wx.ID_FORWARD, True)
self.layer = None
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_bward = self.toolbar.AddLabelTool(wx.ID_ANY, 'PrevLayer', wx.Bitmap(self.path + "/2leftarrow.png"))
button_toolbar_fward = self.toolbar.AddLabelTool(wx.ID_FORWARD, 'NextLayer', wx.Bitmap(self.path + "/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
self.toolbar.EnableTool(wx.ID_FORWARD, False)
def Design(self):
self.column1 = wx.BoxSizer(wx.VERTICAL)
self.column2 = wx.BoxSizer(wx.VERTICAL)
self.final_sizer = wx.BoxSizer(wx.HORIZONTAL)
descr3 = wx.StaticText(self.panel, label='Stimulation Settings')
descr3.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.column1.Add(descr3)
descr5 = wx.StaticText(self.panel, label='Stimulation protocol')
self.dd_type = wx.Choice(self.panel, wx.ID_ANY, size=(150, 30))
self.dd_type.AppendItems(["IClamp", "VClamp"])
self.dd_type.Select(0)
self.dd_type.Bind(wx.EVT_CHOICE, self.protocolSelection)
self.column1.Add(descr5, flag=wx.UP, border=15)
self.column1.Add(self.dd_type, flag=wx.UP, border=5)
self.stimuli_type=wx.Choice(self.panel,wx.ID_ANY,size=(150,30))
self.stimuli_type.AppendItems(["Step Protocol", "Custom Waveform"])
self.stimuli_type.Bind(wx.EVT_CHOICE, self.typeChange)
self.stimuli_type.Select(0)
descr7 = wx.StaticText(self.panel, label='Stimulus Type')
self.column1.Add(descr7, flag=wx.UP, border=15)
self.column1.Add(self.stimuli_type, flag=wx.UP, border=5)
#remove this label
#descr7 = wx.StaticText(self.panel, label='Amplitude')
self.stimuli = wx.Button(self.panel, label="Amplitude(s)")
self.stimuli.Bind(wx.EVT_BUTTON, self.Stimuli)
tmp_sizer2=wx.BoxSizer(wx.HORIZONTAL)
tmp_sizer2.Add(self.stimuli)
self.stimuli2 = wx.Button(self.panel, label="Load Waveform")
self.stimuli2.Bind(wx.EVT_BUTTON, self.Stimuli2)
self.stimuli2.Disable()
self.stimuli2.Hide()
tmp_sizer2.Add(self.stimuli2)
self.column1.Add(tmp_sizer2, flag=wx.UP, border=15)
descr8 = wx.StaticText(self.panel, label='Delay (ms)')
self.del_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="Value")
self.column1.Add(descr8, flag=wx.UP, border=15)
self.column1.Add(self.del_ctrl, flag=wx.UP, border=5)
if self.core.option_handler.type[-1]=="features":
self.del_ctrl.SetValue(str(self.core.data_handler.features_data["stim_delay"]))
descr9 = wx.StaticText(self.panel, label='Duration (ms)')
self.dur_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="Value")
self.column1.Add(descr9, flag=wx.UP, border=15)
self.column1.Add(self.dur_ctrl, flag=wx.UP, border=5)
if self.core.option_handler.type[-1]=="features":
self.dur_ctrl.SetValue(str(self.core.data_handler.features_data["stim_duration"]))
descr6 = wx.StaticText(self.panel, label='Section')
self.dd_sec1 = wx.Choice(self.panel, wx.ID_ANY, size=(150, 30))
self.dd_sec1.Bind(wx.EVT_CHOICE, self.secChange)
tmp=self.core.ReturnSections()
self.dd_sec1.AppendItems(tmp)
try:
self.dd_sec1.Select(tmp.index("Soma"))
except ValueError:
try:
self.dd_sec1.Select(tmp.index("soma"))
except ValueError:
self.dd_sec1.Select(0)
self.column1.Add(descr6, flag=wx.UP, border=15)
self.column1.Add(self.dd_sec1, flag=wx.UP, border=5)
descr10 = wx.StaticText(self.panel, label='Position inside the section')
self.pos_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="Value")
self.pos_ctrl.SetValue("0.5")
self.pos_ctrl.Bind(wx.EVT_TEXT, self.posChange)
self.column1.Add(descr10, flag=wx.UP, border=15)
self.column1.Add(self.pos_ctrl, flag=wx.UP, border=5)
self.final_sizer.Add(self.column1, flag=wx.LEFT, border=15)
descr7 = wx.StaticText(self.panel, label='Parameter to record')
self.dd_record = wx.Choice(self.panel, wx.ID_ANY, size=(100, 30))
self.dd_record.AppendItems(["v", "i"])
self.dd_record.Select(0)
self.column2.Add(descr7, flag=wx.UP, border=35)
self.column2.Add(self.dd_record, flag=wx.UP, border=5)
descr6 = wx.StaticText(self.panel, label='Position')
self.pos_ctrl2 = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="pos")
self.pos_ctrl2.SetValue("0.5")
descr8 = wx.StaticText(self.panel, label='Section')
self.dd_sec = wx.Choice(self.panel, wx.ID_ANY, size=(100, 30))
self.dd_sec.AppendItems(tmp)
try:
self.dd_sec.Select(tmp.index("Soma"))
except ValueError:
try:
self.dd_sec.Select(tmp.index("soma"))
except ValueError:
self.dd_sec.Select(0)
self.column2.Add(descr8, flag=wx.UP, border=15)
self.column2.Add(self.dd_sec, flag=wx.UP, border=5)
self.column2.Add(descr6, flag=wx.UP, border=15)
self.column2.Add(self.pos_ctrl2, flag=wx.UP, border=5)
descr1 = wx.StaticText(self.panel, label='Run Control')
descr1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.column2.Add(descr1,flag=wx.UP, border=25)
descr3 = wx.StaticText(self.panel, label='Initial Voltage (mV)')
self.vrest_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="vrest")
self.vrest_ctrl.SetValue("-65")
self.column2.Add(descr3, flag=wx.UP, border=15)
self.column2.Add(self.vrest_ctrl, flag=wx.UP, border=5)
descr4 = wx.StaticText(self.panel, label='tstop (ms)')
self.tstop_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="tstop")
if self.core.option_handler.type[-1]!="features":
self.tstop_ctrl.SetValue(str(self.core.data_handler.data.t_length))
else:
self.tstop_ctrl.SetValue(str(self.core.data_handler.features_data["stim_delay"] + self.core.data_handler.features_data["stim_duration"]+100))
self.column2.Add(descr4, flag=wx.UP, border=15)
self.column2.Add(self.tstop_ctrl, flag=wx.UP, border=5)
descr5 = wx.StaticText(self.panel, label='dt')
self.dt_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(100, 30), name="dt")
self.dt_ctrl.SetValue(str(0.05))
self.column2.Add(descr5, flag=wx.UP, border=15)
self.column2.Add(self.dt_ctrl, flag=wx.UP, border=5)
self.final_sizer.Add(self.column2, flag=wx.LEFT, border=75)
self.panel.SetSizer(self.final_sizer)
def typeChange(self,e):
if self.stimuli_type.GetSelection()==0:#step prot
self.stimuli.Enable()
self.stimuli2.Disable()
self.del_ctrl.Enable()
self.dur_ctrl.Enable()
self.stimuli2.Hide()
self.stimuli.Show()
self.final_sizer.Layout()
#hide wave button
if self.stimuli_type.GetSelection()==1:#wave prot
self.stimuli2.Enable()
self.stimuli.Disable()
self.del_ctrl.Disable()
self.del_ctrl.SetValue("0")
self.dur_ctrl.Disable()
self.dur_ctrl.SetValue("1e9")
self.stimuli.Hide()
self.stimuli2.Show()
self.final_sizer.Layout()
#hide step button
def protocolSelection(self,e):
if self.dd_type.GetSelection()==1:
self.dd_sec.Disable()
self.pos_ctrl2.Disable()
self.dd_record.SetSelection(1)
else:
self.dd_sec.Enable()
self.pos_ctrl2.Enable()
def secChange(self,e):
if self.dd_type.GetSelection()==1:
self.dd_sec.SetSelection(self.dd_sec1.GetSelection())
def posChange(self,e):
if self.dd_type.GetSelection()==1:
self.pos_ctrl2.SetValue(self.pos_ctrl.GetValue())
def Stimuli(self, e):
self.stim_window = stimuliwindow(self, self.core)
def Stimuli2(self,e):
self.stim_window = stimuliwindow2(self)
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
input_file = dlg.GetDirectory() + "/" + dlg.GetFilename()
dlg.Destroy()
self.stim_window.container.append(input_file)
# self.del_ctrl.SetValue("0")
# self.del_ctrl.Disable()
# self.dur_ctrl.SetValue("0")
# self.dur_ctrl.Disable()
def Next(self, e):
try:
self.core.SecondStep({"stim" : [str(self.dd_type.GetItems()[self.dd_type.GetCurrentSelection()]), float(self.pos_ctrl.GetValue()), str(self.dd_sec1.GetItems()[self.dd_sec1.GetCurrentSelection()])],
"stimparam" : [self.stim_window.container, float(self.del_ctrl.GetValue()), float(self.dur_ctrl.GetValue())]})
self.kwargs = {"runparam":[float(self.tstop_ctrl.GetValue()),
float(self.dt_ctrl.GetValue()),
str(self.dd_record.GetItems()[self.dd_record.GetCurrentSelection()]),
str(self.dd_sec.GetItems()[self.dd_sec.GetCurrentSelection()]),
float(self.pos_ctrl2.GetValue()),
float(self.vrest_ctrl.GetValue())]}
if self.core.option_handler.output_level=="1":
print {"stim" : [str(self.dd_type.GetItems()[self.dd_type.GetCurrentSelection()]), float(self.pos_ctrl.GetValue()), str(self.dd_sec1.GetItems()[self.dd_sec1.GetCurrentSelection()])],
"stimparam" : [self.stim_window.container, float(self.del_ctrl.GetValue()), float(self.dur_ctrl.GetValue())]}
print self.kwargs
except AttributeError:
wx.MessageBox("No stimulus amplitude was selected!","Error", wx.OK | wx.ICON_ERROR)
except ValueError:
wx.MessageBox('Some of the cells are empty. Please fill out all of them!', 'Error', wx.OK | wx.ICON_ERROR)
try:
#self.layer.Design()
self.layer.Show()
self.layer.kwargs=self.kwargs
except AttributeError:
#self.layer = algorithmLayer(self, 4, self.Size, "Select Algorithm", self.core, self.path, self.kwargs)
self.layer = ffunctionLayer(self, 4, self.Size, "Fitness Function Selection", self.core, self.path, self.kwargs)
#self.layer.Design()
self.layer.Show()
self.Hide()
def Prev(self, e):
self.Hide()
self.parent.Show()
def my_close(self, e):
wx.Exit()
#optimizer settings
#fittnes function settings
#might need new interface
class ffunctionLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path, kwargs):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.core = core
self.panel = wx.Panel(self)
self.parent = parent
#this will need to be wrapped in a try statement later:
import optimizer
#print optimizer.__file__
path = os.path.dirname(optimizer.__file__)
self.path = path
self.Center()
self.ToolbarCreator()
self.Design()
self.seed = None
self.kwargs = kwargs
#print "ffun",kwargs
self.layer = None
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_bward = self.toolbar.AddLabelTool(wx.ID_ANY, 'PrevLayer', wx.Bitmap(self.path + "/2leftarrow.png"))
button_toolbar_fward = self.toolbar.AddLabelTool(wx.ID_FORWARD, 'NextLayer', wx.Bitmap(self.path + "/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
self.toolbar.EnableTool(wx.ID_FORWARD, True)
def Design(self):
self.column1 = wx.BoxSizer(wx.VERTICAL)
self.column2 = wx.BoxSizer(wx.VERTICAL)
self.row0 = wx.BoxSizer(wx.HORIZONTAL)
descr0 = wx.StaticText(self.panel, label='Fitness Functions')
descr0.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
descr1 = wx.StaticText(self.panel, label='Weights')
self.row0.Add(descr1)
#descr2 = wx.StaticText(self.panel, label='Normalized Weights')
#self.row0.Add(descr2, flag=wx.LEFT, border=10)
descr3 = wx.StaticText(self.panel, label='Function Parameters')
descr3.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.row0.Add(descr3, flag=wx.LEFT, border=10)
self.column2.Add(self.row0, flag=wx.BOTTOM, border=8)
if self.core.option_handler.type[-1]!="features":
self.my_list = copy(self.core.ffun_calc_list)
#self.my_list=["ffun1","ffun","ffun3"]
else:
self.my_list=self.core.data_handler.features_data.keys()[3:]
self.param_list = [[]] * len(self.my_list)
if self.core.option_handler.type[-1]!="features":
self.param_list[2] = [("Spike Detection Thres. (mv)",0.0)]
self.param_list[1] = [("Spike Detection Thres. (mv)",0.0), ("Spike Window (ms)",1.0)]
else:
self.param_list[0] = [("Spike Detection Thres. (mv)",0.0)]
self.param_list_container = []
self.weights = []
#self.norm_weights = []
tmp = []
#for f in self.param_list:
for i, f in enumerate(self.param_list):
self.row1 = wx.BoxSizer(wx.HORIZONTAL)
tmp_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(50, 25))
if self.core.option_handler.type[-1]=="features":
tmp_ctrl.SetValue(str(self.core.data_handler.features_data[self.my_list[i]]["weight"]))
tmp_ctrl.Disable()
self.weights.append(tmp_ctrl)
self.row1.Add(tmp_ctrl)
# tmp_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(50, 25))
# tmp_ctrl.Disable()
# self.norm_weights.append(tmp_ctrl)
# self.row1.Add(tmp_ctrl, flag=wx.LEFT, border=15)
for p in f:
tmp_ctrl = wx.TextCtrl(self.panel, id=wx.ID_ANY, size=(50, 25))
tmp_ctrl.SetValue(str(p[1]))
if self.core.option_handler.type[-1]!="features":
tmp_ctrl.Disable()
tmp.append(tmp_ctrl)
descr4 = wx.StaticText(self.panel, label=p[0])
self.row1.Add(descr4, flag=wx.LEFT, border=20)
self.row1.Add(tmp_ctrl, flag=wx.LEFT, border=2)
self.param_list_container.append(tmp)
self.column2.Add(self.row1, flag=wx.UP, border=1)
tmp = []
self.listbox = wx.CheckListBox(self.panel, wx.ID_ANY,size=(250,30*len(self.my_list)),choices=self.my_list)
#lb_size=self.listbox.GetSize()
#self.listbox.SetSize((200,lb_size[1]))
self.listbox.SetFont(wx.Font(12,wx.FONTFAMILY_DEFAULT,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_BOLD))
self.listbox.Bind(wx.EVT_CHECKLISTBOX, self.FunSelect)
self.listbox.GetChecked()
self.column1.Add(descr0)
self.column1.Add(self.listbox, flag=wx.ALL, border=10)
self.normalize = wx.Button(self.panel, label="Normalize")
self.normalize.Bind(wx.EVT_BUTTON, self.Normalize)
self.row3 = wx.BoxSizer(wx.HORIZONTAL)
self.row3.Add(self.normalize, flag=wx.LEFT, border=10)
self.column2.Add(self.row3, flag=wx.UP, border=50)
self.final_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.final_sizer.Add(self.column1, flag=wx.LEFT, border=10)
self.final_sizer.Add(self.column2, flag=wx.LEFT, border=10)
self.SetSizer(self.final_sizer)
def Normalize(self, e):
is_enabled = filter(lambda x: x[1].IsEnabled(), enumerate(self.weights))
tmp = []
for n in is_enabled:
try:
tmp.append(float(n[1].GetValue()))
except ValueError:
continue
sum_o_weights = sum(tmp)
for n in is_enabled:
try:
self.weights[n[0]].SetValue(str(float(n[1].GetValue()) / float(sum_o_weights)))
except ValueError:
continue
def FunSelect(self, e):
for i, n in enumerate(self.listbox.GetItems()):
if i in self.listbox.Checked:
try:
if self.core.option_handler.type[-1]!="features":
for p in self.param_list_container[i]:
p.Enable()
self.weights[i].Enable()
#self.norm_weights[i].Enable()
except IndexError:
break
else:
try:
if self.core.option_handler.type[-1]!="features":
for p in self.param_list_container[i]:
p.Disable()
self.weights[i].Disable()
#self.norm_weights[i].Disable()
except IndexError:
break
def Next(self, e):
tmp_dict = {}
for fun, fun_name in zip(self.param_list_container, self.param_list):
for f, f_n in zip(fun, fun_name):
if f.IsEnabled():
tmp_dict.update({f_n[0] : float(f.GetValue())})
#print tmp_dict
if self.core.option_handler.type[-1]!="features":
self.kwargs.update({"feat":
[tmp_dict,
[self.core.ffun_calc_list[fun[0]] for fun in filter(lambda x: x[1].IsEnabled(), enumerate(self.weights))]]
})
self.kwargs.update({"weights" : [float(w.GetValue()) for w in filter(lambda x: x.IsEnabled(), self.weights)]})
else:
#self.my_list=self.core.data_handler.features_data.keys()[3:-1]
self.kwargs.update({"feat":
[tmp_dict,
[self.my_list[fun[0]] for fun in filter(lambda x: x[1].IsEnabled(), enumerate(self.weights))]]
})
self.kwargs.update({"weights" : [float(w.GetValue()) for w in filter(lambda x: x.IsEnabled(), self.weights)]})
if not(0.99<sum(self.kwargs["weights"])<=1.01):
dlg = wx.MessageDialog(self, "You did not normalize your weights!\nDo you want to continue?",'Warning', wx.YES_NO | wx.ICON_QUESTION)
b_id=dlg.ShowModal()
if b_id== wx.ID_YES:
dlg.Destroy()
#print "yes"
if b_id == wx.ID_NO:
#print "no"
dlg.Destroy()
return
try:
self.layer.Show()
self.layer.Design()
self.layer.kwargs=self.kwargs
except AttributeError:
#self.layer = resultsLayer(self, 4, self.Size, "Results", self.core, self.path)
self.layer = algorithmLayer(self, 4, self.Size, "Select Algorithm", self.core, self.path, self.kwargs)
self.layer.Show()
self.layer.Design()
self.Hide()
def Prev(self, e):
# self.Hide()
# tmp=self.parent
# grandparent=self.parent.parent
# grandparent.layer=algorithmLayer(grandparent, 4, self.Size, "Select Algorithm", self.core, self.path, self.kwargs)
# self.parent=grandparent.layer
# self.parent.Design()
# tmp.Destroy()
# self.parent.Show()
self.parent.Show()
self.Hide()
self.Destroy()
def my_close(self, e):
wx.Exit()
class algorithmLayer(wx.Frame):
def __init__(self,parent,ID,size,title,core,path,kwargs):
wx.Frame.__init__(self,parent,ID,title=title,size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.core=core
self.panel=wx.Panel(self)
#self.sub_panel=wx.Panel(self.panel,size=(300,300))
self.parent=parent
self.core=core
self.path=path
self.Center()
self.ToolbarCreator()
self.Design()
self.seed=None
self.num_of_ctrl=None
self.kwargs=kwargs
#print "algo",kwargs
self.layer=None
def ToolbarCreator(self):
self.toolbar=self.CreateToolBar()
button_toolbar_bward=self.toolbar.AddLabelTool(wx.ID_ANY,'PrevLayer',wx.Bitmap(self.path+"/2leftarrow.png"))
button_toolbar_fward=self.toolbar.AddLabelTool(wx.ID_FORWARD,'NextLayer',wx.Bitmap(self.path+"/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
self.toolbar.EnableTool(wx.ID_FORWARD,True)
def Design(self):
self.column1=wx.BoxSizer(wx.VERTICAL)
self.column2=wx.BoxSizer(wx.VERTICAL)
#self.column3=wx.BoxSizer(wx.VERTICAL)
self.sub_row=wx.BoxSizer(wx.HORIZONTAL)
self.sub_row2=wx.BoxSizer(wx.HORIZONTAL)
self.final_sizer=wx.BoxSizer(wx.HORIZONTAL)
descr2 = wx.StaticText(self.panel, label='Optimizer Settings')
descr2.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.column1.Add(descr2)
descr18 = wx.StaticText(self.panel, label='Random seed:')
self.seed_ctrl=wx.TextCtrl(self.panel,id=wx.ID_ANY,size=(100,30),name="seed")
self.seed_ctrl.SetValue("1234")
self.column1.Add(descr18,flag=wx.UP,border=15)
self.column1.Add(self.seed_ctrl,flag=wx.UP,border=5)
descr22 = wx.StaticText(self.panel, label='Algorithm:')
self.dd_evo=wx.Choice(self.panel,wx.ID_ANY,size=(175,30))
self.dd_evo.Append("Classical EO")
self.dd_evo.Append("Simulated Annealing")
self.dd_evo.Append("Particle Swarm")
self.dd_evo.Append("Basinhopping")
self.dd_evo.Append("Nelder-Mead")
self.dd_evo.Append("L-BFGS-B")
self.dd_evo.Append("Differential Evolution")
self.dd_evo.Append("Random Search")
#self.dd_evo.Select(0)
self.num_of_ctrl=3
self.dd_evo.Bind(wx.EVT_CHOICE, self.Algo_Select)
self.column1.Add(descr22,flag=wx.UP,border=15)
self.column1.Add(self.dd_evo,flag=wx.UP,border=5)
self.run = wx.Button(self.panel, label="Run")
self.run.Disable()
self.run.Bind(wx.EVT_BUTTON, self.Run)
self.sub_row2.Add(self.run)
self.boundaries=wx.Button(self.panel,label="Boundaries")
self.boundaries.Bind(wx.EVT_BUTTON, self.Boundaries)
self.sub_row.Add(self.boundaries)
self.starting_points=wx.Button(self.panel,label="Starting Points")
self.starting_points.Bind(wx.EVT_BUTTON, self.Seed)
self.sub_row.Add(self.starting_points,flag=wx.LEFT,border=15)
self.column1.Add(self.sub_row,flag=wx.UP,border=15)
self.column1.Add(self.sub_row2,flag=wx.UP,border=15)
descr24 = wx.StaticText(self.panel, label='Number of parameters to optimize:'+str(len(self.core.option_handler.GetObjTOOpt())))
self.column1.Add(descr24,flag=wx.UP,border=15)
#self.column2.Add(self.sub_panel,flag=wx.EXPAND)
self.final_sizer.Add(self.column1,flag=wx.LEFT,border=5)
self.SetSizer(self.final_sizer)
self.final_sizer.Layout()
def Algo_Select(self,e):
descr19 = ('Size of Population:',100)
descr20 = ('Number of Generations:',100)
descr21 = ('Mutation Rate:',0.25)
descr22 = ('Cooling Rate:',0.5)
descr23 = ('Mean of Gaussian:',0)
descr24 = ('Std. Deviation of Gaussian:',1)
descr26 = ('Initial Temperature:',1.2)
descr28 = ('Accuracy:',1e-06)
descr25 = ('Update Frequency:',50)
descr27 = ('Temperature:',0.1)
descr29 = ('Step Size:', 0.1)
descr32 = ('Number of Iterations:',100)
descr33 = ('Number of Repetition:',100)
descr30 = ('Error Tolerance for x:',0.0001)
descr31 = ('Error Tolerance for f:',0.0001)
descr34 = ('Inertia:', 0.5)
descr35 = ('Cognitive Rate:', 2.1)
descr36 = ("Social Rate:",2.1)
descr37 = ('Neighborhood Size:', 5)
descr38 = ('Topology:')
descr39 = ('Crossover Rate:',1)
while(self.num_of_ctrl>0):
self.column2.Hide(self.num_of_ctrl-1)
self.column2.Remove(self.num_of_ctrl-1)
self.num_of_ctrl-=1
self.SetSizer(self.final_sizer)
self.final_sizer.Layout()
self.final_sizer.Hide(1)
self.final_sizer.Remove(1)
self.SetSizer(self.final_sizer)
self.final_sizer.Layout()
self.column2=wx.BoxSizer(wx.VERTICAL)
selected_algo=self.dd_evo.GetItems()[self.dd_evo.GetSelection()]
if selected_algo=="Classical EO":
alg=[descr19,descr20,descr21]
elif selected_algo=="Simulated Annealing":
alg=[descr20,descr21,descr22,descr23,descr24,descr26]
elif selected_algo=="Particle Swarm":
alg=[descr19,descr20,descr34,descr35,descr36]
elif selected_algo=="Basinhopping":
alg=[descr32,descr33,descr25,descr27,descr29]
elif selected_algo=="Nelder-Mead":
alg=[descr20,descr30,descr31]
elif selected_algo=="L-BFGS-B":
alg=[descr20,descr28]
elif selected_algo=="Differential Evolution":
alg=[descr19,descr20,descr21,descr39]
elif selected_algo=="Random Search":
alg=[descr19]
self.algo_param=[]
for i in range(len(alg)):
if alg[i]==descr38:
tmp=wx.Choice(self.panel,wx.ID_ANY,size=(100,30),choices=["Ring","Star"])
else:
tmp=wx.TextCtrl(self.panel,id=wx.ID_ANY,size=(100,30))
tmp.SetValue(str(alg[i][1]))
self.algo_param.append((tmp,alg[i][0]))
self.column2.Add(wx.StaticText(self.panel,label=alg[i][0]),flag=wx.UP,border=15)
self.column2.Add(tmp,flag=wx.UP,border=5)
value=self.core.option_handler.GetOptimizerOptions().get(alg[i][0])
if value!=None:
tmp.SetValue(str(value))
elif self.kwargs.get("algo_options",{}).get(alg[i][0],None)!=None:
tmp.SetValue(str(self.kwargs.get("algo_options",{}).get(alg[i][0],None)))
self.final_sizer.Add(self.column2,flag=wx.LEFT,border=100)
self.SetSizer(self.final_sizer)
self.final_sizer.Layout()
self.run.Enable()
def Seed(self, e):
num_o_params=len(self.core.option_handler.GetObjTOOpt())
seeds = []
new_dialog_window=MyDialog2(self,num_o_params,seeds)
new_dialog_window.ShowModal()
# if dlg.ShowModal()==wx.ID_CANCEL:
# print "cancel"
# seeds=None
# dlg.Destroy()
#print len(seeds)
#print seeds
#if len(seeds)!=num_o_params or len(seeds[0])!=num_o_params:
# seeds=None
self.seed = seeds
#print self.seed
def Boundaries(self, e):
boundarywindow(self)
#self.run.Enable()
def Run(self, e):
try:
tmp = {"seed" : int(self.seed_ctrl.GetValue()),
"evo_strat" : str(self.dd_evo.GetItems()[self.dd_evo.GetCurrentSelection()])
}
for n in self.algo_param:
tmp.update({str(n[1]) : float(n[0].GetValue())})
tmp.update({
"num_params" : len(self.core.option_handler.GetObjTOOpt()),
"boundaries" : self.core.option_handler.boundaries ,
"starting_points" : self.seed
})
self.kwargs.update({"algo_options":tmp})
except AttributeError:
dlg = wx.MessageBox( "You forget to select an algorithm!",'Error', wx.OK | wx.ICON_ERROR)
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
if self.core.option_handler.output_level=="1":
self.core.Print()
#[map(float,map(wxTextCtrl.GetValue,fun)) for fun in self.param_list_container]
if self.core.option_handler.output_level=="1":
print self.kwargs
try:
self.core.ThirdStep(self.kwargs)
#wx.MessageBox('Optimization finished. Press the Next button for the results!', 'Done', wx.OK | wx.ICON_EXCLAMATION)
if self.core.option_handler.output_level=="1":
self.core.Print()
self.toolbar.EnableTool(wx.ID_FORWARD, True)
self.seed = None
self.Next(None)
except sizeError as sE:
wx.MessageBox("There was an error during the optimization: "+sE.m, 'Error', wx.OK | wx.ICON_EXCLAMATION)
#except ValueError:
#wx.MessageBox('Some of the cells are empty. Please fill out all of them!', 'Error', wx.OK|wx.ICON_ERROR)
def Next(self, e):
try:
self.layer.Show()
self.layer.kwargs=self.kwargs
self.layer.Design()
except AttributeError:
self.layer = resultsLayer(self, 4, self.Size, "Results", self.core, self.path,self.kwargs)
#self.layer = ffunctionLayer(self, 4, self.Size, "Fitness Function Selection", self.core, self.path, self.kwargs)
self.layer.Show()
#self.layer.Design()
self.Hide()
def Prev(self, e):
self.Hide()
self.parent.Show()
self.Destroy()
def my_close(self, e):
wx.Exit()
class resultsLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path,kwargs):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.core = core
self.layer = None
#this will need to be wrapped in a try statement later:
import optimizer
#print optimizer.__file__
path = os.path.dirname(optimizer.__file__)
self.path = path
self.panel = wx.Panel(self)
self.parent = parent
self.kwargs=kwargs
# try:
# self.core.FourthStep()
# self.Center()
# self.ToolbarCreator()
# self.Design()
# except AttributeError:
# wx.MessageBox("No optimization result to display!","Error",wx.OK | wx.ICON_ERROR)
# self.Prev(None)
self.core.FourthStep()
self.Center()
self.ToolbarCreator()
self.Design()
def Design(self):
heading = wx.StaticText(self.panel, label='Final Result', pos=(10, 15))
heading.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
text = "Results:"
for n, k in zip(self.core.option_handler.GetObjTOOpt(), self.core.optimizer.fit_obj.ReNormalize(self.core.optimizer.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)])):
if n.split()[0]==n.split()[-1]:
param=[n.split()[0], n.split()[-1]]
text += "\n" + param[0] + "\n" + "\t" + str(k)
else:
param=[n.split()[0], "segment: " + n.split()[1], n.split()[-1]]
#print param
if n.split()[1]!=n.split()[-1]:
text += "\n" + ": \n".join(param) + ":" + "\n" + "\t" + str(k)
else:
text += "\n" + param[0] + ": " + param[-1] + "\n" + "\t" + str(k)
text += "\n" + "fitness:\n" + "\t" + str(self.core.optimizer.final_pop[0].fitness)
wx.StaticText(self.panel, label=text, pos=(10, 40))
wx.StaticLine(self.panel, pos=(1, 0), size=(self.Size[0], 1))
wx.StaticLine(self.panel, pos=(200, 0), size=(1, self.GetSize()[1]), style=wx.LI_VERTICAL)
canvas = wx.Panel(self.panel, pos=(210, 10), size=(550, self.GetSize()[1]))
figure = Figure(figsize=(7, 6))
axes = figure.add_subplot(111)
FigureCanvas(canvas, -1, figure)
self.panel.Fit()
self.Show()
canvas.Show()
exp_data = []
model_data = []
if self.core.option_handler.type[-1]!="features":
for n in range(self.core.data_handler.number_of_traces()):
exp_data.extend(self.core.data_handler.data.GetTrace(n))
model_data.extend(self.core.final_result[n])
no_traces=self.core.data_handler.number_of_traces()
t = self.core.option_handler.input_length
step = self.core.option_handler.run_controll_dt
axes.set_xticks([n for n in range(0, int((t*no_traces)/(step)), int((t*no_traces)/(step)/5.0)) ])
axes.set_xticklabels([str(n) for n in range(0, t*no_traces, (t*no_traces)/5)])
axes.set_xlabel("time [ms]")
_type=self.core.data_handler.data.type
unit="mV" if _type=="voltage" else "nA" if _type=="current" else ""
axes.set_ylabel(_type+" [" + unit + "]")
axes.plot(range(0, len(exp_data)), exp_data)
axes.plot(range(0, len(model_data)), model_data, 'r')
axes.legend(["target", "model"])
figure.savefig("result_trace.png", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
figure.savefig("result_trace.eps", dpi=None, facecolor='w', edgecolor='w')
figure.savefig("result_trace.svg", dpi=None, facecolor='w', edgecolor='w')
param_save=wx.Button(self.panel,id=wx.ID_ANY,label="Save\nParameters",pos=(105,5),size=(90,50))
param_save.Bind(wx.EVT_BUTTON,self.SaveParam)
else:
for n in range(len(self.core.data_handler.features_data["stim_amp"])):
model_data.extend(self.core.final_result[n])
no_traces=len(self.core.data_handler.features_data["stim_amp"])
t = int(self.core.option_handler.run_controll_tstop) # instead of input_length
step = self.core.option_handler.run_controll_dt
axes.set_xticks([n for n in range(0, int((t*no_traces)/(step)), int((t*no_traces)/(step)/5.0)) ])
axes.set_xticklabels([str(n) for n in range(0, t*no_traces, (t*no_traces)/5)])
axes.set_xlabel("time [ms]")
_type=str(self.kwargs["runparam"][2]) #parameter to record
_type_ = "Voltage" if _type =="v" else "Current" if _type=="c" else ""
unit="mV" if _type=="v" else "nA" if _type=="c" else ""
axes.set_ylabel(_type_+" [" + unit + "]")
#axes.plot(range(0, len(exp_data)), exp_data)
axes.plot(range(0, len(model_data)), model_data, 'r')
axes.legend(["model"])
figure.savefig("result_trace.png", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
figure.savefig("result_trace.eps", dpi=None, facecolor='w', edgecolor='w')
figure.savefig("result_trace.svg", dpi=None, facecolor='w', edgecolor='w')
param_save=wx.Button(self.panel,id=wx.ID_ANY,label="Save\nParameters",pos=(105,5),size=(90,50))
param_save.Bind(wx.EVT_BUTTON,self.SaveParam)
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_bward = self.toolbar.AddLabelTool(wx.ID_ANY, 'PrevLayer', wx.Bitmap(self.path + "/2leftarrow.png"))
button_toolbar_fward = self.toolbar.AddLabelTool(wx.ID_FORWARD, 'NextLayer', wx.Bitmap(self.path + "/2rightarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Next, button_toolbar_fward)
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
#self.toolbar.EnableTool(wx.ID_FORWARD,False)
def Prev(self, e):
self.Hide()
tmp=self.parent
grandparent=self.parent.parent
grandparent.layer=algorithmLayer(grandparent, 4, self.Size, "Select Algorithm", self.core, self.path, self.kwargs)
self.parent=grandparent.layer
#self.parent.Design()
tmp.Destroy()
self.parent.Show()
# self.Destroy()
# self.parent.Show()
def Next(self, e):
self.Hide()
try:
self.layer.Design()
self.layer.Show()
except AttributeError:
self.layer = analyzisLayer(self, 5, self.Size, "Analysis", self.core, self.path)
self.layer.Design()
self.layer.Show()
def SaveParam(self, e):
dlg = wx.FileDialog(self, "Type a filename", os.getcwd(), "", "*.*", style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.save_file_name=dlg.GetFilename()
f=open(self.save_file_name,"w")
params=self.core.optimizer.fit_obj.ReNormalize(self.core.optimizer.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)])
#params=self.core.optimizer.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)]
f.write("\n".join(map(str,params)))
dlg.Destroy()
def my_close(self, e):
wx.Exit()
class analyzisLayer(wx.Frame):
def __init__(self, parent, ID, size, title, core, path):
wx.Frame.__init__(self, parent, ID, title=title, size=size)
self.Bind(wx.EVT_CLOSE, self.my_close)
self.panel = wx.Panel(self)
self.parent = parent
self.core = core
#this will need to be wrapped in a try statement later:
import optimizer
#print optimizer.__file__
path = os.path.dirname(optimizer.__file__)
self.path = path
self.Center()
self.ToolbarCreator()
self.Design()
#self.Text()
self.Buttons()
def Buttons(self):
gen_plot = wx.Button(self.panel, label="Generation Plot", pos=(200, 300))
gen_plot.Bind(wx.EVT_BUTTON, self.PlotGen)
grid_plot = wx.Button(self.panel, label="Grid Plot", pos=(200, 400))
grid_plot.Bind(wx.EVT_BUTTON, self.PlotGrid)
def Design(self):
heading = wx.StaticText(self.panel, label='Analysis', pos=(10, 15))
heading.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
text = "Results:"
for n, k in zip(self.core.option_handler.GetObjTOOpt(), self.core.optimizer.fit_obj.ReNormalize(self.core.optimizer.final_pop[0].candidate[0:len(self.core.option_handler.adjusted_params)])):
if n.split()[0]==n.split()[-1]:
param=[n.split()[0], n.split()[-1]]
text += "\n" + param[0] + "\n" + "\t" + str(k)
else:
param=[n.split()[0], "segment: " + n.split()[1], n.split()[-1]]
#print param
if n.split()[1]!=n.split()[-1]:
text += "\n" + ": \n".join(param) + ":" + "\n" + "\t" + str(k)
else:
text += "\n" + param[0] + ": " + param[-1] + "\n" + "\t" + str(k)
text += "\n" + "fitness:\n" + "\t" + str(self.core.optimizer.final_pop[0].fitness)
wx.StaticText(self.panel, label=text, pos=(10, 35))
wx.StaticText(self.panel, label='Fitness statistics', pos=(410, 35))
wx.StaticLine(self.panel, pos=(400, 0), size=(1, 600), style=wx.LI_VERTICAL)
try:
stats = inspyred.ec.analysis.fitness_statistics(self.core.optimizer.final_pop)
except AttributeError:
stats={'best' : "unkown",'worst' : "unkown",'mean' : "unkown",'median' : "unkown", 'std' : "unkown"}
string = "Best: " + str(stats['best']) + "\nWorst: " + str(stats['worst']) + "\nMean: " + str(stats['mean']) + "\nMedian: " + str(stats['median']) + "\nStd:" + str(stats['std'])
wx.StaticText(self.panel, label=string, pos=(410, 55))
#insert table with error components: 410,200
self.error_comp_table = wx.ListCtrl(self.panel, pos=(410, 200),size=(350,150),style=wx.LC_REPORT | wx.BORDER_SUNKEN)
self.error_comp_table.InsertColumn(0, 'Error Function', width=200)
self.error_comp_table.InsertColumn(1, 'Value', width=200)
self.error_comp_table.InsertColumn(2, 'Weight', width=200)
self.error_comp_table.InsertColumn(3, 'Weighted Value', width=200)
#tmp_list=[]
for c_idx,c in enumerate(zip(*self.core.error_comps)):
tmp=[0]*4
for t_idx in range(len(c)):
#print c[t_idx]
tmp[1]+=c[t_idx][2]
tmp[2]=c[t_idx][0]
tmp[3]+=c[t_idx][2]*c[t_idx][0]
if self.core.option_handler.type[-1]!='features':
tmp[0]=self.core.ffun_mapper[c[t_idx][1].__name__]
else:
tmp[0]=(c[t_idx][1])
tmp=map(str,tmp)
#tmp_list.append(tmp)
self.error_comp_table.InsertStringItem(c_idx,tmp[0])
self.error_comp_table.SetStringItem(c_idx,1,tmp[1])
self.error_comp_table.SetStringItem(c_idx,2,tmp[2])
self.error_comp_table.SetStringItem(c_idx,3,tmp[3])
self.error_button=wx.Button(self.panel,pos=(410,410),label="Error Details")
self.error_button.Bind(wx.EVT_BUTTON,self.ShowErrorDialog)
def ToolbarCreator(self):
self.toolbar = self.CreateToolBar()
button_toolbar_bward = self.toolbar.AddLabelTool(wx.ID_ANY, 'PrevLayer', wx.Bitmap(self.path + "/2leftarrow.png"))
self.toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.Prev, button_toolbar_bward)
def PlotGen(self, e):
import os.path
if os.path.getmtime("stat_file.txt") <= self.core.option_handler.start_time_stamp:
wx.MessageBox('Generation plot is not available for this algorithm.', 'Error', wx.OK | wx.ICON_ERROR)
else:
try:
generation_plot("stat_file.txt")
except ValueError:
stat_file=open("stat_file.txt","rb")
generation_plot(stat_file)
self.Show()
def PlotGrid(self, e):
self.prev_bounds=copy(self.core.option_handler.boundaries)
self.resolution=10
self.bw=gridwindow(self)
self.bw.Show()
def ShowErrorDialog(self,e):
self.extra_error_dialog=ErrorDialog(self)
self.extra_error_dialog.Show()
def DisplayGrid(self):
self.bw.Hide()
from math import sqrt,ceil
dlg=wx.MessageDialog(self,"Start calculating grid?\nThe calculation might take several minutes depending your machine's performance.",style=wx.OK|wx.CANCEL)
if dlg.ShowModal()==wx.ID_OK:
self.bw.Destroy()
act_bounds=self.core.option_handler.boundaries
if self.core.grid_result == None or act_bounds!=self.prev_bounds:
self.core.callGrid(self.resolution)
no_dims = int(ceil(sqrt(len(self.core.option_handler.GetObjTOOpt()))))
import matplotlib.pyplot as plt
f, axes = plt.subplots(no_dims, no_dims, squeeze=False)
a = []
for i in axes:
for j in i:
a.append(j)
#,marker='o', color='r', ls=''
for i in range(len(self.core.option_handler.GetObjTOOpt())):
#for points, fitness in zip(self.core.optimizer.final_pop[0][i],self.core.optimizer.final_pop[1][i]):
for points, fitness in zip(self.core.grid_result[0][i],self.core.grid_result[1][i]):
a[i].plot(points[i],
fitness[0], marker='o', color='r', ls='')
a[i].set_title(self.core.option_handler.GetObjTOOpt()[i].split()[-1])
a[i].set_ylabel("Error Value")
a[i].relim()
a[i].autoscale(True,'both',False)
#hide unused subplots
for i in range(len(self.core.option_handler.GetObjTOOpt()),no_dims**2):
a[i].axis('off')
matplotlib.pyplot.show()
def Prev(self, e):
self.Destroy()
self.parent.Show()
def my_close(self, e):
wx.Exit()
def main(param=None):
app = wx.App(False)
core = Core.coreModul()
if param!=None:
core.option_handler.output_level=param.lstrip("-v_level=")
layer = inputLayer(None, 0, (800, 600), "Input Trace Selection", core, os.getcwd())
#layer=modelLayer(None,0,(800,600),"Input Trace Selection",None,"/".join(os.getcwd().split("/")[0:-1]))
#layer=stimuliLayer(None,0,(800,600),"Input Trace Selection",None,"/".join(os.getcwd().split("/")[0:-1]))
#layer=algorithmLayer(None,0,(800,600),"Input Trace Selection",None,"/".join(os.getcwd().split("/")[0:-1]))
#layer=ffunctionLayer(None,0,(800,600),"Input Trace Selection",None,os.getcwd(),{})
#layer.Show()
app.MainLoop()
try:
##core.model_handler.hoc_obj.quit()
pass
except AttributeError:
pass
sys.exit()
if __name__ == "__main__":
main()
| lgpl-2.1 |
RayMick/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.