repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
mtaufen/test-infra | mungegithub/issue-labeler/simple_app.py | 20 | 4662 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import simplejson
import logging
from logging.handlers import RotatingFileHandler
import numpy as np
from flask import Flask, request
from sklearn.feature_extraction import FeatureHasher
from sklearn.externals import joblib
from sklearn.linear_model import SGDClassifier
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
#Parameters
team_fn= "./models/trained_teams_model.pkl"
component_fn= "./models/trained_components_model.pkl"
logFile = "/tmp/issue-labeler.log"
logSize = 1024*1024*100
numFeatures = 262144
myLoss = 'hinge'
myAlpha = .1
myPenalty = 'l2'
myHasher = FeatureHasher(input_type="string", n_features= numFeatures, non_negative=True)
myStemmer = PorterStemmer()
tokenizer = RegexpTokenizer(r'\w+')
try:
if not stopwords:
stop_fn = "./stopwords.txt"
with open(stop_fn, 'r') as f:
stopwords = set([word.strip() for word in f])
except:
#don't remove any stopwords
stopwords = []
@app.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@app.route("/", methods = ["POST"])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', "")
body = request.form.get('body', "")
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(team_fn)
comp_mod = joblib.load(component_fn)
vec = myHasher.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(inputString):
inputString = inputString.encode('utf-8')
curTitleBody = tokenizer.tokenize(inputString.decode('utf-8').lower())
return map(myStemmer.stem, filter(lambda x: x not in stopwords, curTitleBody))
@app.route("/update_models", methods = ["PUT"])
def update_model():
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
tTokens = []
cTokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
tLabel = filter(lambda x: x.startswith('team'), label_list)
cLabel = filter(lambda x: x.startswith('component'), label_list)
tokens = tokenize_stem_stop(" ".join([title, body]))
if tLabel:
team_labels += tLabel
tTokens += [tokens]
if cLabel:
component_labels += cLabel
cTokens += [tokens]
tVec = myHasher.transform(tTokens)
cVec = myHasher.transform(cTokens)
if team_labels:
if os.path.isfile(team_fn):
team_model = joblib.load(team_fn)
team_model.partial_fit(tVec, np.array(team_labels))
else:
#no team model stored so build a new one
team_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
team_model.fit(tVec, np.array(team_labels))
if component_labels:
if os.path.isfile(component_fn):
component_model = joblib.load(component_fn)
component_model.partial_fit(cVec, np.array(component_labels))
else:
#no comp model stored so build a new one
component_model = SGDClassifier(loss=myLoss, penalty=myPenalty, alpha=myAlpha)
component_model.fit(cVec, np.array(component_labels))
joblib.dump(team_model, team_fn)
joblib.dump(component_model, component_fn)
return ""
def configure_logger():
FORMAT = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(logFile, maxBytes=logSize, backupCount=3)
formatter = logging.Formatter(FORMAT)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
app.run(host="0.0.0.0")
| apache-2.0 |
minireference/noBSLAnotebooks | aspynb/Testing.py | 1 | 1948 | def cells():
'''
# Testing SymPy and plot helpers are working
'''
'''
'''
# setup SymPy
from sympy import *
x, y, z, t = symbols('x y z t')
init_printing()
# download plot_helpers.py for use in colab
if 'google.colab' in str(get_ipython()):
print('Downloading plot_helpers.py to util/ (only neded for colab')
!mkdir util; wget https://raw.githubusercontent.com/minireference/noBSLAnotebooks/master/util/plot_helpers.py -P util
# setup plotting
%matplotlib inline
import matplotlib.pyplot as mpl
from util.plot_helpers import plot_vec, plot_vecs, autoscale_arrows
'''
'''
# check if SymPy knows how to simplify trig expressions
simplify(sin(2*x)*cos(2*x))
'''
'''
# define a column vector a
a = Matrix([1,1,1])
a
'''
'''
# BAD define the floating point number approximation 1/3
1/3
'''
'''
# define the fraction 1/3 (an exact rational number)
S(1)/3
'''
'''
# The S()-stuff is necessary to avoid Python behaviour,
# which is to treat 1/3 as a floating point number:
type(1/3)
'''
'''
type(S(1)/3)
'''
'''
'''
'''
# obtain numeric approximation (as a float)
N(S(1)/3)
'''
'''
# N, .n(), and .evalf() are equivalent ways to obtain numeric approx:
N((S(1)/3)), (S(1)/3).n(), (S(1)/3).evalf()
'''
'''
# the .n() method allows for arbitrary level precisions
pi.n(100)
'''
'''
# Euler's constant
E
'''
'''
'''
'''
'''
'''
'''
## Plot helpers
'''
'''
'''
# vector defined as a Python list
u = [1,2]
plot_vec(u)
autoscale_arrows()
'''
'''
# vector defined as a SymPy 2x1 Matrix (a column vector)
v = Matrix([1,2])
plot_vec(v)
autoscale_arrows()
'''
'''
'''
'''
| mit |
Fkawala/gcloud-python | docs/conf.py | 4 | 9557 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 21 22:24:47 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from email import message_from_string
import os
from pkg_resources import get_distribution
import sys
import urllib
import sphinx_rtd_theme
ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'google-cloud'
copyright = u'2014, Google'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
distro = get_distribution('google-cloud')
release = os.getenv('SPHINX_RELEASE', distro.version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not ON_READ_THE_DOCS:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = '#'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'google-cloud-doc'
html_context = {}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
metadata = distro.get_metadata(distro.PKG_INFO)
author = message_from_string(metadata).get('Author')
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'google-cloud.tex', u'google-cloud Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'google-cloud', u'google-cloud Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'google-cloud', u'google-cloud Documentation',
author, 'google-cloud', 'Python API for Google Cloud.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# This pulls class descriptions from the class docstring,
# and parameter definitions from the __init__ docstring.
autoclass_content = 'both'
# Configuration for intersphinx:
# Refer to the Python standard library and the oauth2client and
# httplib2 libraries.
intersphinx_mapping = {
'httplib2': ('http://httplib2.readthedocs.io/en/latest/', None),
'oauth2client': ('http://oauth2client.readthedocs.io/en/latest', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/2', None),
'google-auth': ('https://google-auth.readthedocs.io/en/stable', None),
}
| apache-2.0 |
astocko/statsmodels | statsmodels/base/model.py | 25 | 76781 | from __future__ import print_function
from statsmodels.compat.python import iterkeys, lzip, range, reduce
import numpy as np
from scipy import stats
from statsmodels.base.data import handle_data
from statsmodels.tools.tools import recipr, nan_dot
from statsmodels.stats.contrast import ContrastResults, WaldTestResults
from statsmodels.tools.decorators import resettable_cache, cache_readonly
import statsmodels.base.wrapper as wrap
from statsmodels.tools.numdiff import approx_fprime
from statsmodels.formula import handle_formula_data
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.base.optimizer import Optimizer
_model_params_doc = """
Parameters
----------
endog : array-like
1-d endogenous response variable. The dependent variable.
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See
:func:`statsmodels.tools.add_constant`."""
_missing_param_doc = """\
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none.'"""
_extra_param_doc = """
hasconst : None or bool
Indicates whether the RHS includes a user-supplied constant. If True,
a constant is not checked for and k_constant is set to 1 and all
result statistics are calculated as if a constant is present. If
False, a constant is not checked for and k_constant is set to 0.
"""
class Model(object):
__doc__ = """
A (predictive) statistical model. Intended to be subclassed not used.
%(params_doc)s
%(extra_params_doc)s
Notes
-----
`endog` and `exog` are references to any data provided. So if the data is
already stored in numpy arrays and it is changed then `endog` and `exog`
will change as well.
""" % {'params_doc' : _model_params_doc,
'extra_params_doc' : _missing_param_doc + _extra_param_doc}
def __init__(self, endog, exog=None, **kwargs):
missing = kwargs.pop('missing', 'none')
hasconst = kwargs.pop('hasconst', None)
self.data = self._handle_data(endog, exog, missing, hasconst,
**kwargs)
self.k_constant = self.data.k_constant
self.exog = self.data.exog
self.endog = self.data.endog
self._data_attr = []
self._data_attr.extend(['exog', 'endog', 'data.exog', 'data.endog'])
if 'formula' not in kwargs: # won't be able to unpickle without these
self._data_attr.extend(['data.orig_endog', 'data.orig_exog'])
# store keys for extras if we need to recreate model instance
# we don't need 'missing', maybe we need 'hasconst'
self._init_keys = list(kwargs.keys())
if hasconst is not None:
self._init_keys.append('hasconst')
def _get_init_kwds(self):
"""return dictionary with extra keys used in model.__init__
"""
kwds = dict(((key, getattr(self, key, None))
for key in self._init_keys))
return kwds
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
@classmethod
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
#TODO: provide a docs template for args/kwargs from child models
#TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patys it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, design_info) = tmp
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'design_info': design_info})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
@property
def endog_names(self):
return self.data.ynames
@property
def exog_names(self):
return self.data.xnames
def fit(self):
"""
Fit a model to data.
"""
raise NotImplementedError
def predict(self, params, exog=None, *args, **kwargs):
"""
After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models.
"""
raise NotImplementedError
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None, **kwargs):
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
# TODO: if the intent is to re-initialize the model with new data then this
# method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=100,
full_output=True, disp=True, fargs=(), callback=None, retall=False,
skip_hessian=False, **kwargs):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
If False (default), then the negative inverse hessian is calculated
after the optimization. If True, then the hessian will not be
calculated. However, it will be available in methods that use the
hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
All kwargs are passed to the chosen solver with one exception. The
following keyword controls what happens after the fit::
warn_convergence : bool, optional
If True, checks the model for the converged flag. If the
converged flag is False, a ConvergenceWarning is issued.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should "
"be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params, *args: -self.score(params, *args) / nobs
try:
hess = lambda params, *args: -self.hessian(params, *args) / nobs
except:
hess = None
if method == 'newton':
score = lambda params, *args: self.score(params, *args) / nobs
hess = lambda params, *args: self.hessian(params, *args) / nobs
#TODO: why are score and hess positive?
warn_convergence = kwargs.pop('warn_convergence', True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
fargs, kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output)
#NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault('cov_params_func', None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
elif not skip_hessian:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
except:
#might want custom warning ResultsWarning? NumericalWarning?
from warnings import warn
warndoc = ('Inverting hessian failed, no bse or '
'cov_params available')
warn(warndoc, RuntimeWarning)
Hinv = None
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
if 'use_t' in kwargs:
kwds['use_t'] = kwargs['use_t']
#prints for debugging
#print('kwargs inLikelihoodModel.fit', kwargs)
#print('kwds inLikelihoodModel.fit', kwds)
#TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)
#TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals['converged']:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn("Maximum Likelihood optimization failed to converge. "
"Check mle_retvals", ConvergenceWarning)
mlefit.mle_settings = optim_settings
return mlefit
#TODO: the below is unfinished
class GenericLikelihoodModel(LikelihoodModel):
"""
Allows the fitting of any likelihood function via maximum likelihood.
A subclass needs to specify at least the log-likelihood
If the log-likelihood is specified for each observation, then results that
require the Jacobian will be available. (The other case is not tested yet.)
Notes
-----
Optimization methods that require only a likelihood function are 'nm' and
'powell'
Optimization methods that require a likelihood function and a
score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
Hessian is optional for 'ncg'.
Optimization method that require a likelihood function, a score/gradient,
and a Hessian is 'newton'
If they are not overwritten by a subclass, then numerical gradient,
Jacobian and Hessian of the log-likelihood are caclulated by numerical
forward differentiation. This might results in some cases in precision
problems, and the Hessian might not be positive definite. Even if the
Hessian is not positive definite the covariance matrix of the parameter
estimates based on the outer product of the Jacobian might still be valid.
Examples
--------
see also subclasses in directory miscmodels
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog)
# in this dir
from model import GenericLikelihoodModel
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
import numpy as np
np.allclose(res.params, probit_res.params)
"""
def __init__(self, endog, exog=None, loglike=None, score=None,
hessian=None, missing='none', extra_params_names=None,
**kwds):
# let them be none in case user wants to use inheritance
if not loglike is None:
self.loglike = loglike
if not score is None:
self.score = score
if not hessian is None:
self.hessian = hessian
self.__dict__.update(kwds)
# TODO: data structures?
#TODO temporary solution, force approx normal
#self.df_model = 9999
#somewhere: CacheWriteWarning: 'df_model' cannot be overwritten
super(GenericLikelihoodModel, self).__init__(endog, exog,
missing=missing)
# this won't work for ru2nmnl, maybe np.ndim of a dict?
if exog is not None:
#try:
self.nparams = (exog.shape[1] if np.ndim(exog) == 2 else 1)
if extra_params_names is not None:
self._set_extra_params_names(extra_params_names)
def _set_extra_params_names(self, extra_params_names):
# check param_names
if extra_params_names is not None:
if self.exog is not None:
self.exog_names.extend(extra_params_names)
else:
self.data.xnames = extra_params_names
self.nparams = len(self.exog_names)
#this is redundant and not used when subclassing
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
#Initialize is called by
#statsmodels.model.LikelihoodModel.__init__
#and should contain any preprocessing that needs to be done for a model
from statsmodels.tools import tools
if self.exog is not None:
# assume constant
self.df_model = float(np_matrix_rank(self.exog) - 1)
self.df_resid = (float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
def expandparams(self, params):
'''
expand to full parameter array when some parameters are fixed
Parameters
----------
params : array
reduced parameter array
Returns
-------
paramsfull : array
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation.
'''
paramsfull = self.fixed_params.copy()
paramsfull[self.fixed_paramsmask] = params
return paramsfull
def reduceparams(self, params):
return params[self.fixed_paramsmask]
def loglike(self, params):
return self.loglikeobs(params).sum(0)
def nloglike(self, params):
return -self.loglikeobs(params).sum(0)
def loglikeobs(self, params):
return -self.nloglikeobs(params)
def score(self, params):
'''
Gradient of log-likelihood evaluated at params
'''
kwds = {}
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglike, **kwds).ravel()
def score_obs(self, params, **kwds):
'''
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
'''
#kwds.setdefault('epsilon', 1e-4)
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglikeobs, **kwds)
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7.")
def hessian(self, params):
'''
Hessian of log-likelihood evaluated at params
'''
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike)
def fit(self, start_params=None, method='nm', maxiter=500, full_output=1,
disp=1, callback=None, retall=0, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.LikelihoodModel.fit
"""
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
else:
start_params = 0.1 * np.ones(self.nparams)
fit_method = super(GenericLikelihoodModel, self).fit
mlefit = fit_method(start_params=start_params,
method=method, maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback, **kwargs)
genericmlefit = GenericLikelihoodModelResults(self, mlefit)
#amend param names
exog_names = [] if (self.exog_names is None) else self.exog_names
k_miss = len(exog_names) - len(mlefit.params)
if not k_miss == 0:
if k_miss < 0:
self._set_extra_params_names(
['par%d' % i for i in range(-k_miss)])
else:
# I don't want to raise after we have already fit()
import warnings
warnings.warn('more exog_names than parameters', UserWarning)
return genericmlefit
#fit.__doc__ += LikelihoodModel.fit.__doc__
class Results(object):
"""
Class to contain model results
Parameters
----------
model : class instance
the previously specified model instance
params : array
parameter estimates from the fit model
"""
def __init__(self, model, params, **kwd):
self.__dict__.update(kwd)
self.initialize(model, params, **kwd)
self._data_attr = []
def initialize(self, model, params, **kwd):
self.params = params
self.model = model
if hasattr(model, 'k_constant'):
self.k_constant = model.k_constant
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
#TODO: public method?
class LikelihoodModelResults(Results):
"""
Class to contain results from likelihood models
Parameters
-----------
model : LikelihoodModel instance or subclass instance
LikelihoodModelResults holds a reference to the model that is fit.
params : 1d array_like
parameter estimates from estimated model
normalized_cov_params : 2d array
Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
scale : float
For (some subset of models) scale will typically be the
mean square error from the estimated model (sigma^2)
Returns
-------
**Attributes**
mle_retvals : dict
Contains the values returned from the chosen optimization method if
full_output is True during the fit. Available only if the model
is fit by maximum likelihood. See notes below for the output from
the different methods.
mle_settings : dict
Contains the arguments passed to the chosen optimization method.
Available if the model is fit by maximum likelihood. See
LikelihoodModel.fit for more information.
model : model instance
LikelihoodResults contains a reference to the model that is fit.
params : ndarray
The parameters estimated for the model.
scale : float
The scaling factor of the model given during instantiation.
tvalues : array
The t-values of the standard errors.
Notes
-----
The covariance of params is given by scale times normalized_cov_params.
Return values by solver if full_output is True during fit:
'newton'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
score : ndarray
The score vector at the optimum.
Hessian : ndarray
The Hessian at the optimum.
warnflag : int
1 if maxiter is exceeded. 0 if successful convergence.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'nm'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
warnflag : int
1: Maximum number of function evaluations made.
2: Maximum number of iterations reached.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'bfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
Hinv : ndarray
value of the inverse Hessian matrix at minimum. Note
that this is just an approximation and will often be
different from the value of the analytic Hessian.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient
and/or function calls are not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'lbfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
fcalls : int
Number of calls to loglike.
warnflag : int
Warning flag:
- 0 if converged
- 1 if too many function evaluations or too many iterations
- 2 if stopped for another reason
converged : bool
True: converged. False: did not converge.
'powell'
fopt : float
Value of the (negative) loglikelihood at its minimum.
direc : ndarray
Current direction set.
iterations : int
Number of iterations performed.
fcalls : int
Number of calls to loglike.
warnflag : int
1: Maximum number of function evaluations. 2: Maximum number
of iterations.
converged : bool
True : converged. False: did not converge.
allvecs : list
Results at each iteration.
'cg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient and/
or function calls not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'ncg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
hcalls : int
Number of calls to hessian.
warnflag : int
1: Maximum number of iterations exceeded.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
"""
# by default we use normal distribution
# can be overwritten by instances or subclasses
use_t = False
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
**kwargs):
super(LikelihoodModelResults, self).__init__(model, params)
self.normalized_cov_params = normalized_cov_params
self.scale = scale
# robust covariance
# We put cov_type in kwargs so subclasses can decide in fit whether to
# use this generic implementation
if 'use_t' in kwargs:
use_t = kwargs['use_t']
if use_t is not None:
self.use_t = use_t
if 'cov_type' in kwargs:
cov_type = kwargs.get('cov_type', 'nonrobust')
cov_kwds = kwargs.get('cov_kwds', {})
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
use_t = self.use_t
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def normalized_cov_params(self):
raise NotImplementedError
def _get_robustcov_results(self, cov_type='nonrobust', use_self=True,
use_t=None, **cov_kwds):
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
return self.params / self.bse
@cache_readonly
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
"""
Returns the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast
of the estimates of params or all params multiplied by scale which
will usually be an estimate of sigma^2. Scale is assumed to be
a scalar.
Parameters
----------
r_matrix : array-like
Can be 1d, or 2d. Can be used alone or with other.
column : array-like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
other : array-like, optional
Can be used when r_matrix is specified.
Returns
-------
cov : ndarray
covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
"""
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.')
if other is not None and r_matrix is None:
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
#return cov_p[column][:, column]
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == ():
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p
#TODO: make sure this works as needed for GLMs
def t_test(self, r_matrix, cov_p=None, scale=None,
use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like, str, tuple
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
<T contrast: effect=-1829.2025687192481, sd=455.39079425193762,
t=-4.0167754636411717, p=0.0015163772380899498, df_denom=9>
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
See Also
---------
tvalues : individual t statistics
f_test : for F tests
patsy.DesignInfo.linear_constraint
"""
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('Need covariance of parameters for computing '
'T statistics')
if num_params != self.params.shape[0]:
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
#switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
_t = _sd = None
_effect = np.dot(r_matrix, self.params)
# nan_dot multiplies with the convention nan * 0 = 0
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(self.cov_params(
r_matrix=r_matrix, cov_p=cov_p)))
else:
_sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p))
_t = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=_t, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=_t, sd=_sd,
df_denom=df_resid,
distribution='norm')
def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
"""
Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F contrast: F=330.28533923463488, p=4.98403052872e-10,
df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F contrast: F=9.740461873303655, p=0.00560528853174, df_denom=9,
df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
See Also
--------
statsmodels.stats.contrast.ContrastResults
wald_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res
#TODO: untested for GLMs?
def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
use_f=None):
"""
Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
See also
--------
statsmodels.stats.contrast.ContrastResults
f_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
if use_f is None:
#switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
if (self.normalized_cov_params is None and cov_p is None and
invcov is None and not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'F statistics')
cparams = np.dot(r_matrix, self.params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.inv(cov_p)
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=invcov.shape[0])
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,))
def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None):
"""
Compute a sequence of Wald tests for terms over multiple columns
This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.
Parameters
----------
skip_single : boolean
If true, then terms that consist only of a single column and,
therefore, refers only to a single parameter is skipped.
If false, then all terms are included.
extra_constraints : ndarray
not tested yet
combine_terms : None or list of strings
Each string in this list is matched to the name of the terms or
the name of the exogenous variables. All columns whose name
includes that string are combined in one joint test.
Returns
-------
test_result : result instance
The result instance contains `table` which is a pandas DataFrame
with the test results: test statistic, degrees of freedom and
pvalues.
Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
F P>F df constraint df denom
Intercept 279.754525 2.37985521351e-22 1 51
C(Duration, Sum) 5.367071 0.0245738436636 1 51
C(Weight, Sum) 12.432445 3.99943118767e-05 2 51
C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51
>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)",
data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False,
combine_terms=['Duration', 'Weight'])
>>> print(wt)
chi2 P>chi2 df constraint
Intercept 15.695625 7.43960374424e-05 1
C(Weight) 16.132616 0.000313940174705 2
C(Duration) 1.009147 0.315107378931 1
C(Weight):C(Duration) 0.216694 0.897315972824 2
Duration 11.187849 0.010752286833 3
Weight 30.263368 4.32586407145e-06 4
"""
# lazy import
from collections import defaultdict
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
design_info = getattr(result.model.data.orig_exog, 'design_info', None)
if design_info is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if design_info is not None:
for term in design_info.terms:
cols = design_info.slice(term)
name = term.name()
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = identity[col]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for name, constraint in constraints + combined_constraints + extra_constraints:
wt = result.wald_test(constraint)
row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution nerutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
from pandas import DataFrame
table = DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res
def conf_int(self, alpha=.05, cols=None, method='default'):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
method : string
Not Implemented Yet
Method to estimate the confidence_interval.
"Default" : uses self.bse which is based on inverse Hessian for MLE
"hjjh" :
"jac" :
"boot-bse"
"boot_quant"
"profile"
Returns
--------
conf_int : array
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]])
Notes
-----
The confidence interval is based on the standard normal distribution.
Models wish to use a different distribution should overwrite this
method.
"""
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def save(self, fname, remove_data=False):
'''
save a pickle of this instance
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
'''
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname)
@classmethod
def load(cls, fname):
'''
load a pickle, (class method)
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
Returns
-------
unpickled instance
'''
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname)
def remove_data(self):
'''remove data arrays, all nobs arrays from result and model
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning:: Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The list of arrays to delete is maintained as an attribute of the
result and model instance, except for cached values. These lists could
be changed before calling remove_data.
'''
def wipe(obj, att):
#get to last element in attribute path
p = att.split('.')
att_ = p.pop(-1)
try:
obj_ = reduce(getattr, [obj] + p)
#print(repr(obj), repr(att))
#print(hasattr(obj_, att_))
if hasattr(obj_, att_):
#print('removing3', att_)
setattr(obj_, att_, None)
except AttributeError:
pass
model_attr = ['model.' + i for i in self.model._data_attr]
for att in self._data_attr + model_attr:
#print('removing', att)
wipe(self, att)
data_in_cache = getattr(self, 'data_in_cache', [])
data_in_cache += ['fittedvalues', 'resid', 'wresid']
for key in data_in_cache:
try:
self._cache[key] = None
except (AttributeError, KeyError):
pass
class LikelihoodResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'bse': 'columns',
'pvalues': 'columns',
'tvalues': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
'normalized_cov_params': 'cov',
}
_wrap_attrs = _attrs
_wrap_methods = {
'cov_params': 'cov',
'conf_int': 'columns'
}
wrap.populate_wrapper(LikelihoodResultsWrapper,
LikelihoodModelResults)
class ResultMixin(object):
@cache_readonly
def df_modelwc(self):
# collect different ways of defining the number of parameters, used for
# aic, bic
if hasattr(self, 'df_model'):
if hasattr(self, 'hasconst'):
hasconst = self.hasconst
else:
# default assumption
hasconst = 1
return self.df_model + hasconst
else:
return self.params.size
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_modelwc)
@cache_readonly
def bic(self):
return -2 * self.llf + np.log(self.nobs) * (self.df_modelwc)
@cache_readonly
def score_obsv(self):
'''cached Jacobian of log-likelihood
'''
return self.model.score_obs(self.params)
jacv = np.deprecate(score_obsv, 'jacv', 'score_obsv',
"Use score_obsv attribute."
" jacv will be removed in 0.7.")
@cache_readonly
def hessv(self):
'''cached Hessian of log-likelihood
'''
return self.model.hessian(self.params)
@cache_readonly
def covjac(self):
'''
covariance of parameters based on outer product of jacobian of
log-likelihood
'''
## if not hasattr(self, '_results'):
## raise ValueError('need to call fit first')
## #self.fit()
## self.jacv = jacv = self.jac(self._results.params)
jacv = self.score_obsv
return np.linalg.inv(np.dot(jacv.T, jacv))
@cache_readonly
def covjhj(self):
'''covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh
'''
jacv = self.score_obsv
hessv = self.hessv
hessinv = np.linalg.inv(hessv)
## self.hessinv = hessin = self.cov_params()
return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv))
@cache_readonly
def bsejhj(self):
'''standard deviation of parameter estimates based on covHJH
'''
return np.sqrt(np.diag(self.covjhj))
@cache_readonly
def bsejac(self):
'''standard deviation of parameter estimates based on covjac
'''
return np.sqrt(np.diag(self.covjac))
def bootstrap(self, nrep=100, method='nm', disp=0, store=1):
"""simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : array
mean of parameter estimates over bootstrap replications
std : array
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations.
"""
results = []
print(self.model.__class__)
hascloneattr = True if hasattr(self, 'cloneattr') else False
for i in range(nrep):
rvsind = np.random.randint(self.nobs, size=self.nobs)
#this needs to set startparam and get other defining attributes
#need a clone method on model
fitmod = self.model.__class__(self.endog[rvsind],
self.exog[rvsind, :])
if hascloneattr:
for attr in self.model.cloneattr:
setattr(fitmod, attr, getattr(self.model, attr))
fitres = fitmod.fit(method=method, disp=disp)
results.append(fitres.params)
results = np.array(results)
if store:
self.bootstrap_results = results
return results.mean(0), results.std(0), results
def get_nlfun(self, fun):
#I think this is supposed to get the delta method that is currently
#in miscmodels count (as part of Poisson example)
pass
class GenericLikelihoodModelResults(LikelihoodModelResults, ResultMixin):
"""
A results class for the discrete dependent variable models.
..Warning :
The following description has not been updated to this version/class.
Where are AIC, BIC, ....? docstring looks like copy from discretemod
Parameters
----------
model : A DiscreteModel instance
mlefit : instance of LikelihoodResults
This contains the numerical optimization results as returned by
LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels
Returns
-------
*Attributes*
Warning most of these are not available yet
aic : float
Akaike information criterion. -2*(`llf` - p) where p is the number
of regressors including the intercept.
bic : float
Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
number of regressors including the intercept.
bse : array
The standard errors of the coefficients.
df_resid : float
See model definition.
df_model : float
See model definition.
fitted_values : array
Linear predictor XB.
llf : float
Value of the loglikelihood
llnull : float
Value of the constant-only loglikelihood
llr : float
Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
llr_pvalue : float
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
prsquared : float
McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
"""
def __init__(self, model, mlefit):
self.model = model
self.endog = model.endog
self.exog = model.exog
self.nobs = model.endog.shape[0]
# TODO: possibly move to model.fit()
# and outsource together with patching names
if hasattr(model, 'df_model'):
self.df_model = model.df_model
else:
self.df_model = len(mlefit.params)
# retrofitting the model, used in t_test TODO: check design
self.model.df_model = self.df_model
if hasattr(model, 'df_resid'):
self.df_resid = model.df_resid
else:
self.df_resid = self.endog.shape[0] - self.df_model
# retrofitting the model, used in t_test TODO: check design
self.model.df_resid = self.df_resid
self._cache = resettable_cache()
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Maximum Likelihood']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), # [self.df_resid]),
('Df Model:', None), # [self.df_model])
]
top_right = [ # ('R-squared:', ["%#8.3f" % self.rsquared]),
# ('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
# ('F-statistic:', ["%#8.4g" % self.fvalue] ),
# ('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), # ["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=False)
return smry
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/core/groupby.py | 9 | 140196 | import types
from functools import wraps
import numpy as np
import datetime
import collections
import warnings
import copy
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable, filter, map
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, CategoricalIndex, _ensure_index
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import (cache_readonly, Appender, make_signature,
deprecate_kwarg)
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object,
is_datetime_or_timedelta_dtype, is_bool,
is_bool_dtype, AbstractMethodError)
from pandas.core.config import option_context
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object. For full specification of available frequencies, please see
`here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key], axis=self.axis,
level=self.level, sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax.get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
""" default to the standard binner here """
group_axis = obj._get_axis(self.axis)
return Grouping(group_axis, None, obj=obj, name=self.key,
level=self.level, sort=self.sort, in_axis=False)
@property
def groups(self):
return self.grouper.groups
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self, *args, **kwargs):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_indices(self, names):
""" safe get multiple indices, translate keys for datelike to underlying repr """
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)]) for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
def _set_result_index_ordered(self, result):
# set the result index on the passed values object
# return the new object
# related 8046
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.index = index
result = result.sort_index()
result.index = self.obj.index
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or kwargs_with_axis['axis']==None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def irow(self, i):
"""
DEPRECATED. Use ``.nth(i)`` instead
"""
# 10177
warnings.warn("irow(i) is deprecated. Please use .nth(i)",
FutureWarning, stacklevel=2)
return self.nth(i)
def count(self):
""" Compute count of group, excluding missing values """
# defined here for API doc
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError("dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
m = self.grouper._max_groupsize
# filter out values that are outside [-m, m)
pos_nth_values = [i for i in nth_values if i >= 0 and i < m]
neg_nth_values = [i for i in nth_values if i < 0 and i >= -m]
self._set_selection_from_grouper()
if not dropna: # good choice
if not pos_nth_values and not neg_nth_values:
# no valid nth values
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
for i in pos_nth_values:
rng[i] = True
is_nth = self._cumcount_array(rng)
if neg_nth_values:
rng = np.zeros(m, dtype=bool)
for i in neg_nth_values:
rng[- i - 1] = True
is_nth |= self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = MultiIndex.from_arrays([self.obj[name][is_nth] for name in names]).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def cumprod(self, axis=0):
"""
Cumulative product for each group
"""
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumprod')
def cumsum(self, axis=0):
"""
Cumulative sum for each group
"""
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis))
return self._cython_transform('cumsum')
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
labels, _, ngroups = self.grouper.group_info
# filled in by Cython
indexer = np.zeros_like(labels)
_algos.group_shift_indexer(indexer, labels, ngroups, periods)
output = {}
for name, obj in self._iterate_slices():
output[name] = com.take_nd(obj.values, indexer)
return self._wrap_transformed_output(output)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, ascending=True):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_transform(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis, self.groupings, self.sort, self.group_keys = \
axis, groupings, sort, group_keys
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[ids != -1], minlength=ngroup)
return Series(out, index=self.result_index, dtype='int64')
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return _compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(comp_ids,
obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].group_index.rename(self.names[0])
return MultiIndex(levels=[ping.group_index for ping in self.groupings],
labels=self.recons_labels,
verify_integrity=False,
names=self.names)
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
},
'transform': {
'cumprod' : 'group_cumprod',
'cumsum' : 'group_cumsum',
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(_algos, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _algos.ensure_float64(values)
elif com.is_integer_dtype(values):
values = values.astype('int64', copy=False)
elif is_numeric:
values = _algos.ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _algos.ensure_float64(values)
func, dtype_str = self._get_cython_function(kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, labels, func, is_numeric)
elif kind == 'transform':
result = np.empty_like(values, dtype=out_dtype)
result.fill(np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(result, accum, values, labels, func, is_numeric)
if com.is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
result[result == tslib.iNaT] = np.nan
if kind == 'aggregate' and self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
com._ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0):
return self._cython_operation('aggregate', values, how, axis)
def transform(self, values, how, axis=0):
return self._cython_operation('transform', values, how, axis)
def _aggregate(self, result, counts, values, comp_ids, agg_func, is_numeric):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def _transform(self, result, accum, values, comp_ids, transform_func, is_numeric):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], values, comp_ids, accum)
else:
transform_func(result, values, comp_ids, accum)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = com._ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isnull(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
_cython_functions['aggregate']['ohlc'] = 'group_ohlc'
_cython_functions['aggregate'].pop('median')
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
# must have an ordered categorical
if self.sort:
if not self.grouper.ordered:
# technically we cannot group on an unordered Categorical
# but this a user convenience to do so; the ordering
# is preserved and if it's a reduction it doesn't make any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical groupby
else:
cat = self.grouper.unique()
self.grouper = self.grouper.reorder_categories(cat.categories)
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
c = self.grouper.categories
self._group_index = CategoricalIndex(Categorical.from_codes(np.arange(len(c)),
categories=c,
ordered=self.grouper.ordered))
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper, (Series, Index, Categorical, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(self.grouper)
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and not any_groupers
and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != len(obj):
raise ValueError("Categorical dtype grouper must have len(grouper) == len(data)")
ping = Grouping(group_axis, gpr, obj=obj, name=name,
level=level, sort=sort, in_axis=in_axis)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist) :
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist :
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy,name) :
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass,name)
doc = f.__doc__
doc = doc if type(doc)==str else ''
if type(f) == types.MethodType :
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name':name,
'doc':doc,
'sig':','.join(decl),
'self':args[0],
'args':','.join(args_by_name)}
else :
wrapper_template = property_wrapper_template
params = {'name':name, 'doc':doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,_series_apply_whitelist) :
exec(_def_str)
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self.name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
func = _intercept_cython(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
ids, _, ngroup = self.grouper.group_info
mask = ids != -1
out = func().values[ids]
if not mask.all():
out = np.where(mask, out, np.nan)
obs = np.zeros(ngroup, dtype='bool')
obs[ids[mask]] = True
if not obs.all():
out = self._try_cast(out, self._selected_obj)
return Series(out, index=self.obj.index)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
return Series(out if ids[0] != -1 else out[1:],
index=self.grouper.result_index,
name=self.name)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(Series.nlargest.__doc__)
def nlargest(self, n=5, keep='first'):
# ToDo: When we remove deprecate_kwargs, we can remote these methods
# and include nlargest and nsmallest to _series_apply_whitelist
return self.apply(lambda x: x.nlargest(n=n, keep=keep))
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(Series.nsmallest.__doc__)
def nsmallest(self, n=5, keep='first'):
return self.apply(lambda x: x.nsmallest(n=n, keep=keep))
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from functools import partial
from pandas.tools.tile import cut
from pandas.tools.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algos.factorize(val, sort=True)
else:
cat, bins = cut(val, bins, retbins=True)
# bins[:-1] for backward compat;
# o.w. cat.categories could be better
lab, lev, dropna = cat.codes, bins[:-1], False
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self.name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
acc = rep(np.diff(np.r_[idx, len(ids)]))
out /= acc[mask] if dropna else acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
# for compat. with algos.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if com.is_integer_dtype(out):
out = com._ensure_int64(out)
return Series(out, index=mi)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isnull(val)
ids = com._ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups) if ngroups != 0 else []
return Series(out, index=self.grouper.result_index, name=self.name, dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
result, _ = self.grouper.aggregate(block.values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError("Aggregating on a DataFrame is "
"not supported")
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
# If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index, MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2 and
self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
result = result._convert(numeric=True)
date_cols = self._selected_obj.select_dtypes(
include=list(_DATELIKE_DTYPES)).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([ isinstance(v,Timestamp) for v in values ]) else False
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# optimized transforms
func = _intercept_cython(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
results = np.empty_like(obj.values, result.values.dtype)
indices = self.indices
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
if len(indexer) > 0:
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
counts = self.size().fillna(0).values
if any(counts == 0):
results = self._try_cast(results, obj[result.columns])
return (DataFrame(results,columns=result.columns,index=obj.index)
._convert(datetime=True))
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (lib.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame,_apply_whitelist) :
exec(_def_str)
_block_agg_axis = 1
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, Series, Index, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return SeriesGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = DataFrame(result, index=obj.columns,
columns=result_index).T
else:
result = DataFrame(result, index=obj.index,
columns=result_index)
else:
result = DataFrame(result)
return result
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not participated in
the groupings (e.g. may have all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings]):
return result
levels_list = [ ping.group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
d = { self.obj._get_axis_name(self.axis) : index, 'copy' : False }
return result.reindex(**d).sortlevel(axis=self.axis)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from functools import partial
from pandas.lib import count_level_2d
from pandas.core.common import _isnull_ndarraylike as isnull
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isnull(blk.get_values())) for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise AbstractMethodError(self)
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
#----------------------------------------------------------------------
# Misc utilities
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = _compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(com._ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if _int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
from pandas.hashtable import unique_label_indices
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not _int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def _indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = _compress_group_index(ids, sort=True)
ngroups = len(obs)
return _get_group_index_sorter(ids, ngroups)
def _lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key,ordered=True)
if na_position not in ['last','first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n-codes-1)
elif na_position == 'first':
codes = np.where(mask, 0, n-codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return _indexer_from_factorized(labels, shape)
def _nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which handles NaNs
It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isnull(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, labels, levels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [_hash.Int64HashTable(ngroups) for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def _get_indices_dict(label_list, keys):
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if _int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = _get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
#----------------------------------------------------------------------
# sorting levels...cleverly?
def _get_group_index_sorter(group_index, ngroups):
"""
_algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
if alpha + beta * ngroups < count * np.log(count):
sorter, _ = _algos.groupsort_indexer(com._ensure_int64(group_index),
ngroups)
return com._ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def _compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), _hash._SIZE_HINT_LIMIT)
table = _hash.Int64HashTable(size_hint)
group_index = com._ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = com.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = com.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
_func_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
def _intercept_function(func):
return _func_table.get(func, func)
def _intercept_cython(func):
return _cython_table.get(func)
def _groupby_indices(values):
return _algos.groupby_indices(_values_from_object(com._ensure_object(values)))
def numpy_groupby(data, labels, axis=0):
s = np.argsort(labels)
keys, inv = np.unique(labels, return_inverse=True)
i = inv.take(s)
groups_at = np.where(i != np.concatenate(([-1], i[:-1])))[0]
ordered_data = data.take(s, axis=axis)
group_sums = np.add.reduceat(ordered_data, groups_at, axis=axis)
return group_sums
| gpl-2.0 |
vigilv/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
techtonik/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
cycleuser/GeoPython | geopytool/setup.py | 2 | 1631 | #!/usr/bin/env python
#coding:utf-8
import os
from distutils.core import setup
from geopytool.ImportDependence import *
from geopytool.CustomClass import *
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, 'README.md')).read()
except:
README = 'https://github.com/GeoPyTool/GeoPyTool/blob/master/README.md'
setup(name='geopytool',
version=version,
description='a tool for daily geology related task. visit geopytool.com for further information',
longdescription=README,
author='cycleuser',
author_email='cycleuser@cycleuser.org',
url='https://github.com/GeoPyTool/GeoPyTool',
packages=['geopytool'],
package_data={
'geopytool': ['*.py','*.png','*.qm','*.ttf','*.ini','*.md'],
},
include_package_data=True,
#py_modules=['CIPW','Cluster','geopytool/CustomClass','Harker','IMP','Magic','MudStone','MultiDimension','OldCustomClass','Pearce','PlotModel','QAPF','QFL','QmFLt','REE','Rose','Stereo','TAS','TableViewer','Temp','Test','Trace','XY','XYZ','ZirconCe','cli'],
#py_modules=['geopytool.CustomClass'],
install_requires=[
'cython',
'numpy',
'pandas',
'scipy',
'xlrd',
'openpyxl',
'matplotlib',
'BeautifulSoup4',
'requests',
'PyQt5',
'scikit-image',
'scikit-learn',
],
) | gpl-3.0 |
ZenDevelopmentSystems/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
inesc-tec-robotics/robot_localization_tools | scripts/path_velocity_and_acceleration_plotter.py | 2 | 12044 | #!/usr/bin/env python
# coding=UTF-8
# Requires https://github.com/moble/quaternion
import argparse
import ntpath
import sys
import math
import numpy as np
import numpy.linalg
import matplotlib.pyplot as plt
import matplotlib.ticker as tk
import quaternion
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def euclidean_distance(point1, point2):
return numpy.sqrt(numpy.sum((point2 - point1)**2))
def angular_difference_degrees(quaternion1, quaternion2):
diff_quaternion = (quaternion1 * quaternion2.conjugate()).normalized()
if (diff_quaternion.w < 0.0): # shortest path angle
diff_quaternion = -diff_quaternion
return math.degrees(2.0 * math.acos(diff_quaternion.w))
##########################################################################
# Velocity and acceleration Savitzky-Golay filtering computation (http://dsp.stackexchange.com/questions/9498/have-position-want-to-calculate-velocity-and-acceleration)
def sg_filter(time_values, polynomial_order, derivative=0):
"""
time_values = Vector of sample times
polynomial_order = Order of the smoothing polynomial
derivative = Which derivative
"""
mid = len(time_values) / 2
a = time_values - time_values[mid]
expa = lambda time_values: map(lambda i: i**time_values, a)
A = np.r_[map(expa, range(0,polynomial_order+1))].transpose()
Ai = np.linalg.pinv(A)
return Ai[derivative]
def smooth(x, y, window_size=5, polynomial_order=2, derivative=0):
if derivative > polynomial_order:
raise Exception, "derivative must be <= polynomial_order"
data_length = len(x)
result = np.zeros(data_length)
for i in xrange(window_size, data_length - window_size):
start, end = i - window_size, i + window_size + 1
f = sg_filter(x[start:end], polynomial_order, derivative)
result[i] = np.dot(f, y[start:end])
if derivative > 1:
result *= math.factorial(derivative)
return result
if __name__ == "__main__":
##########################################################################
# args
parser = argparse.ArgumentParser(description='PLots line graphs from CSV file')
parser.register('type', 'bool', str2bool)
parser.add_argument('-i', metavar='INPUT_FILE', type=str, required=True, help='CSV input file name')
parser.add_argument('-f', metavar='DERIVATIVE_ORDER', type=int, required=False, default=1, help='Order of derivative (0 -> original data, 1 -> velocity, 2 -> acceleration)')
parser.add_argument('-r', metavar='NUMBER_POINTS_FOR_SMOOTHING', type=int, required=False, default=7, help='Number of data points that will be used to perform Savitzky-Golay filtering (0 disables smoothing, and result is the max of -r and data_points_nr * --smooth)')
parser.add_argument('--smooth', metavar='NUMBER_PERCENTAGE_OF_POINTS_FOR_SMOOTHING', type=float, required=False, default=0.05, help='Percentage of data points that will be used to perform Savitzky-Golay filtering')
parser.add_argument('-g', metavar='POLYNOMIAL_ORDER_FOR_SMOOTHING', type=int, required=False, default=2, help='Polynomial order that will be used to perform Savitzky-Golay filtering')
parser.add_argument('-o', metavar='OUTPUT_FILE_NAME', type=str, required=False, default='results', help='Output file name (exports in svg, eps and pdf)')
parser.add_argument('-p', metavar='INPUT_FILE_WITH_POSITIONS', type='bool', required=False, default=True, help='Whether the file has position or quaternion orientations (computes either the linear or angular velocity)')
parser.add_argument('-x', metavar='FILES_TIME_COLUNM', type=str, required=False, default=0, help='CSV data column with the x data for each file split by -')
parser.add_argument('--sort', metavar='SORT_TIME_COLUNM', type='bool', required=False, default=1, help='Sort all data by their time stamp')
parser.add_argument('-y', metavar='FILES_POSE_START_COLUNMS', type=str, required=False, default=1, help='CSV data columns with the y data separated with + within file and split by - for each file')
parser.add_argument('-z', metavar='FILE_VALUE_DELIMITER', type=str, required=False, default=',', help='Value delimiter in each line')
parser.add_argument('-e', metavar='FILE_N_SKIP_ROWS', type=int, required=False, default=1, help='Number of rows to skip when reading files')
parser.add_argument('-w', metavar='PLOT_LINE_WIDTH', type=float, required=False, default=0.25, help='Plot line width')
parser.add_argument('-u', metavar='PLOT_LINE_STYLE', type=str, required=False, default='-', help='Plot line style')
parser.add_argument('-a', metavar='PLOT_LINE_STYLE_ALPHA', type=float, required=False, default=0.75, help='Plot line alpha')
parser.add_argument('-j', metavar='PLOT_LINE_MARKER', type=str, required=False, default='.', help='Plot line marker')
parser.add_argument('-k', metavar='PLOT_LINE_MARKER_SIZE_WIDTH_MULTIPLIER', type=float, required=False, default=0.75, help='Plot line marker size will be PLOT_LINE_WIDTH * PLOT_LINE_MARKER_SIZE_WIDTH_MULTIPLIER')
parser.add_argument('-m', metavar='X_AXIS_SCALE', type=float, required=False, default=0.000000001, help='X axis scale')
parser.add_argument('-n', metavar='Y_AXIS_SCALE', type=float, required=False, default=1, help='Y axis scale')
parser.add_argument('-b', metavar='X_AXIS_LABEL', type=str, required=False, default='X', help='X axis label')
parser.add_argument('-v', metavar='Y_AXIS_LABEL', type=str, required=False, default='Y', help='Y axis label')
parser.add_argument('-l', metavar='Y_LINES_LABELS', type=str, required=False, default='Data', help='Legend for each y plot line, separated by +')
parser.add_argument('-c', metavar='Y_AXIS_COLORS', type=str, required=False, default='g', help='Y axis colors, separated by + in hex format #rrggbb')
parser.add_argument('-t', metavar='GRAPH_TITLE', type=str, required=False, default='Paths', help='Graph title')
parser.add_argument('--reset', metavar='RESET_X_VALUES', type='bool', required=False, default=False, help='Reset the x values so that they are in range [0..(max-min)]')
parser.add_argument('--grid', metavar='DISPLAY_GRID', type='bool', required=False, default=True, help='Show graph grid')
parser.add_argument('-s', metavar='SAVE_GRAPH', type='bool', required=False, default=True, help='Save graphs to files using the name prefix specified with -o')
parser.add_argument('-q', metavar='ADD_FILE_EXTENSION_TO_PATH', type='bool', required=False, default=False, help='Prepend to path the extension of the output file')
parser.add_argument('-d', metavar='DISPLAY_GRAPH', type='bool', required=False, default=False, help='Show graph')
args = parser.parse_args()
##########################################################################
# graph setup
fig, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)
plt.xlabel(args.b)
plt.ylabel(args.v)
graph_title = plt.title(args.t, fontsize=16)
graph_title.set_y(1.01)
plt.minorticks_on()
if args.grid:
plt.grid(b=True, which='major', color='k', linestyle='--', linewidth=0.30, alpha=0.5)
plt.grid(b=True, which='minor', color='k', linestyle=':', linewidth=0.01, alpha=0.2)
x_min = sys.maxint
x_max = -sys.maxint
y_min = sys.maxint
y_max = -sys.maxint
##########################################################################
# graph plotting
file_names = args.i.split('+')
time_columns = args.x.split('-')
pose_start_columns_per_file = args.y.split('-')
y_colors = args.c.split('+')
y_labels = args.l.split('+')
for idx_file, file in enumerate(file_names):
time_values = np.loadtxt(file, dtype=float, delimiter=args.z, skiprows=args.e, usecols=(int(time_columns[idx_file]),))
if (args.sort):
time_values_sorted_indexs = np.argsort(time_values)
time_values = time_values[time_values_sorted_indexs]
if args.m != 1:
time_values *= args.m
if args.reset:
time_values -= np.min(time_values)
pose_x = np.loadtxt(file, dtype=float, delimiter=args.z, skiprows=args.e, usecols=(int(pose_start_columns_per_file[idx_file]),))
pose_y = np.loadtxt(file, dtype=float, delimiter=args.z, skiprows=args.e, usecols=(int(pose_start_columns_per_file[idx_file]) + 1,))
pose_z = np.loadtxt(file, dtype=float, delimiter=args.z, skiprows=args.e, usecols=(int(pose_start_columns_per_file[idx_file]) + 2,))
if (args.sort):
pose_x = pose_x[time_values_sorted_indexs]
pose_y = pose_y[time_values_sorted_indexs]
pose_z = pose_z[time_values_sorted_indexs]
values_count = np.min([pose_x.size, pose_y.size, pose_z.size])
y_values = np.zeros(values_count)
if args.p:
for i in xrange(1, values_count):
y_values[i] = euclidean_distance(np.array([ pose_x[i-1], pose_y[i-1], pose_z[i-1] ]), np.array([ pose_x[i], pose_y[i], pose_z[i] ]))
else:
pose_w = np.loadtxt(file, dtype=float, delimiter=args.z, skiprows=args.e, usecols=(int(pose_start_columns_per_file[idx_file]) + 3,))
if (args.sort):
pose_w = pose_w[time_values_sorted_indexs]
for i in range(1, values_count):
y_values[i] = angular_difference_degrees( np.quaternion(pose_w[i-1], pose_x[i-1], pose_y[i-1], pose_z[i-1]), np.quaternion(pose_w[i], pose_x[i], pose_y[i], pose_z[i]) )
if args.n != 1:
y_values *= args.n
if args.f >= 0:
if args.r == 0:
for i in xrange(0, values_count-1):
time_diff = time_values[i+1] - time_values[i]
y_values[i] = y_values[i] / (time_diff ** args.f)
else:
y_values = np.cumsum(y_values)
if args.f >= 1:
number_smooth_points = int(np.max([args.r, values_count * args.smooth]))
y_values = smooth(time_values, y_values, number_smooth_points, args.g, args.f)
x_min = np.min([np.min(time_values), x_min])
x_max = np.max([np.max(time_values), x_max])
y_min = np.min([np.min(y_values), y_min])
y_max = np.max([np.max(y_values), y_max])
plt.plot(time_values, y_values, y_colors[idx_file], linewidth=args.w, label=y_labels[idx_file], alpha=args.a, linestyle=args.u, marker=args.j, markersize=args.w * args.k)
plt.axis('tight')
axlim = list(plt.axis())
diff_x = abs(x_max - x_min)
diff_y = abs(y_max - y_min)
axlim[0] = x_min - diff_x * 0.01
axlim[1] = x_max + diff_x * 0.01
axlim[2] = y_min - diff_y * 0.02
axlim[3] = y_max + diff_y * (0.042 * len(y_labels))
if axlim[0] == axlim[1]:
axlim[0] -= 1
axlim[1] += 1
if axlim[2] == axlim[3]:
axlim[2] -= 1
axlim[3] += 1
plt.axis(axlim)
graph_legend = plt.legend(fancybox=True, prop={'size':12})
graph_legend.get_frame().set_alpha(0.75)
plt.draw()
##########################################################################
# output
if args.s:
if args.q:
output_path = ntpath.dirname(args.o)
output_file_name=ntpath.basename(args.o)
plt.savefig('%s/svg/%s.svgz' % (output_path, output_file_name), bbox_inches='tight')
plt.savefig('%s/eps/%s.eps' % (output_path, output_file_name), bbox_inches='tight')
plt.savefig('%s/pdf/%s.pdf' % (output_path, output_file_name), bbox_inches='tight')
# plt.savefig('%s/png/%s.png' % (output_path, output_file_name), dpi=300, bbox_inches='tight')
else:
plt.savefig('%s.svgz' % args.o, bbox_inches='tight')
plt.savefig('%s.eps' % args.o, bbox_inches='tight')
plt.savefig('%s.pdf' % args.o, bbox_inches='tight')
# plt.savefig('%s.png' % args.o, dpi=300, bbox_inches='tight')
if args.d:
plt.show()
exit(0)
| bsd-3-clause |
IGITUGraz/spore-nest-module | examples/center_out_showcase/python/snn_utils/plotter/backends/mpl.py | 3 | 1721 | import logging
import matplotlib.pyplot as plt
import snn_utils.plotter as plotter
logger = logging.getLogger(__name__)
def configure_matplotlib():
plt.ion() # interactive mode
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.facecolor'] = 'white'
plt.switch_backend('TkAgg')
class MatplotlibWindow(plotter.PlotWindow):
def __init__(self, plot_builder, data_source, max_time_window=None, enabled=True):
plotter.PlotWindow.__init__(self, plot_builder, data_source, max_time_window)
self._enabled = enabled
self._layout_on_update = True
self._fig.canvas.mpl_connect('resize_event', self._on_resize)
self._fig.canvas.mpl_connect('button_press_event', self._on_click)
self._update_window_title()
def _create_figure(self):
return plt.figure()
def _draw(self):
self._fig.canvas.draw()
def _update_window_title(self):
self._fig.canvas.set_window_title("Plotter [{}]".format(["disabled", "enabled"][self._enabled]))
def _on_resize(self, resize_event):
self._layout_on_update = True
def _on_click(self, mouse_event):
if mouse_event.button == 3:
# left mouse button
self._enabled = not self._enabled
self._update_window_title()
logger.info("Plotter: drawing {}".format(["disabled", "enabled"][self._enabled]))
def draw(self):
self.get_figure().canvas.flush_events()
if self._layout_on_update:
self._layout_on_update = False
self._fig.tight_layout()
if not self._enabled:
self._draw()
if self._enabled:
plotter.PlotWindow.draw(self)
| gpl-2.0 |
cgarrard/osgeopy-code | Chapter13/chapter13.py | 1 | 7575 | import os
import numpy as np
from osgeo import gdal, ogr
import matplotlib.pyplot as plt
import mapnik
# Set this variable to your osgeopy-data directory so that the following
# examples will work without editing. We'll use the os.path.join() function
# to combine this directory and the filenames to make a complete path. Of
# course, you can type the full path to the file for each example if you'd
# prefer.
data_dir = r'D:\osgeopy-data'
# data_dir =
############################## 13.1 Matplotlib ###########################
import matplotlib.pyplot as plt
# Turn interactive mode on if you want.
#plt.ion()
######################### 13.1.1 Plotting vector data ####################
# Plot a line.
x = range(10)
y = [i * i for i in x]
plt.plot(x, y)
plt.show()
# Plot dots.
plt.plot(x, y, 'ro', markersize=10)
plt.show()
# Make a polygon.
x = list(range(10))
y = [i * i for i in x]
x.append(0)
y.append(0)
plt.plot(x, y, lw=5)
plt.show()
# Draw polygons as patches instead.
from matplotlib.path import Path
import matplotlib.patches as patches
coords = [(0, 0), (0.5, 1), (1, 0), (0, 0)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO]
path = Path(coords, codes)
patch = patches.PathPatch(path, facecolor='red')
plt.axes().add_patch(patch)
plt.show()
# This one has a hole in it. The inner ring must go in the
# opposite direction of the outer ring. In this example,
# outer_coords are clockwise and inner_coords are
# counter-clockwise.
outer_coords = [(0, 0), (0.5, 1), (1, 0), (0, 0)]
outer_codes = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO]
inner_coords = [(0.4, 0.4), (0.5, 0.2),
(0.6, 0.4), (0.4, 0.4)]
inner_codes = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO]
coords = np.concatenate((outer_coords, inner_coords))
codes = np.concatenate((outer_codes, inner_codes))
path = Path(coords, codes)
patch = patches.PathPatch(path, facecolor='red')
plt.axes().add_patch(patch)
plt.show()
################################## Animation ############################
# Animate the albatross GPS locations from chapter 7.
# First set things up.
ds = ogr.Open(os.path.join(data_dir, 'Galapagos'))
gps_lyr = ds.GetLayerByName('albatross_lambert')
extent = gps_lyr.GetExtent()
fig = plt.figure()
plt.axis('equal')
plt.xlim(extent[0] - 1000, extent[1] + 1000)
plt.ylim(extent[2] - 1000, extent[3] + 1000)
plt.gca().get_xaxis().set_ticks([])
plt.gca().get_yaxis().set_ticks([])
# Plot the background continents.
import ch13funcs
land_lyr = ds.GetLayerByName('land_lambert')
row = next(land_lyr)
geom = row.geometry()
for i in range(geom.GetGeometryCount()):
ch13funcs.plot_polygon(geom.GetGeometryRef(i))
# Get the timestamps for one of the birds.
timestamps, coordinates = [], []
gps_lyr.SetAttributeFilter("tag_id = '2131-2131'")
for row in gps_lyr:
timestamps.append(row.GetField('timestamp'))
coordinates.append((row.geometry().GetX(), row.geometry().GetY()))
# Initialize the points and annotation.
point = plt.plot(None, None, 'o')[0]
label = plt.gca().annotate('', (0.25, 0.95), xycoords='axes fraction')
label.set_animated(True)
# Write a function that tells matplotlib which items will change.
def init():
point.set_data(None, None)
return point, label
# Write a function to update the point location and annotation.
def update(i, point, label, timestamps, coordinates):
label.set_text(timestamps[i])
point.set_data(coordinates[i][0], coordinates[i][1])
return point, label
# Finally run the animation.
import matplotlib.animation as animation
a = animation.FuncAnimation(
fig, update, frames=len(timestamps), init_func=init,
fargs=(point, label, timestamps, coordinates),
interval=25, blit=True, repeat=False)
plt.show()
# Write a function that rounds timestamps.
from datetime import datetime, timedelta
def round_timestamp(ts, minutes=60):
ts += timedelta(minutes=minutes/2.0)
ts -= timedelta(
minutes=ts.minute % minutes, seconds=ts.second,
microseconds=ts.microsecond)
return ts
# Initialize the timestamp and coordinates lists with the first set of values.
gps_lyr.SetAttributeFilter("tag_id = '2131-2131'")
time_format = '%Y-%m-%d %H:%M:%S.%f'
row = next(gps_lyr)
timestamp = datetime.strptime(row.GetField('timestamp'), time_format)
timestamp = round_timestamp(timestamp)
timestamps = [timestamp]
coordinates = [(row.geometry().GetX(), row.geometry().GetY())]
# Now get timestamps and coordinates, but fill in empty time slots with
# filler data.
hour = timedelta(hours=1)
for row in gps_lyr:
timestamp = datetime.strptime(row.GetField('timestamp'), time_format)
timestamp = round_timestamp(timestamp)
while timestamps[-1] < timestamp:
timestamps.append(timestamps[-1] + hour)
coordinates.append((None, None))
coordinates[-1] = (row.geometry().GetX(), row.geometry().GetY())
# Change the update function so it only updates coordinates if there are
# some for the current timestamp.
def update(i, point, label, timestamps, coordinates):
label.set_text(timestamps[i])
if coordinates[i][0] is not None:
point.set_data(coordinates[i][0], coordinates[i][1])
return point, label
# Run the animation again, but now it has constant time intervals.
a = animation.FuncAnimation(
fig, update, frames=len(timestamps), init_func=init,
fargs=(point, label, timestamps, coordinates),
interval=25, blit=True, repeat=False)
plt.show()
######################### 13.1.2 Plotting raster data ####################
ds = gdal.Open(r'D:\osgeopy-data\Washington\dem\sthelens_utm.tif')
data = ds.GetRasterBand(1).ReadAsArray()
# Default color ramp
plt.imshow(data)
plt.show()
# Grayscale
plt.imshow(data, cmap='gray')
plt.show()
# Use the function from listing 13.5 to get overview data and plot it.
from listing13_5 import get_overview_data
fn = r'D:\osgeopy-data\Landsat\Washington\p047r027_7t20000730_z10_nn10.tif'
data = get_overview_data(fn)
data = np.ma.masked_equal(data, 0)
plt.imshow(data, cmap='gray')
plt.show()
# Plot it using stretched data.
mean = np.mean(data)
std_range = np.std(data) * 2
plt.imshow(data, cmap='gray', vmin=mean-std_range, vmax=mean+std_range)
plt.show()
# Try plotting 3 bands.
os.chdir(r'D:\osgeopy-data\Landsat\Washington')
red_fn = 'p047r027_7t20000730_z10_nn30.tif'
green_fn = 'p047r027_7t20000730_z10_nn20.tif'
blue_fn = 'p047r027_7t20000730_z10_nn10.tif'
red_data = get_overview_data(red_fn)
green_data = get_overview_data(green_fn)
blue_data = get_overview_data(blue_fn)
data = np.dstack((red_data, green_data, blue_data))
plt.imshow(data)
plt.show()
# Plot 3 stretched bands.
from listing13_6 import stretch_data
red_data = stretch_data(get_overview_data(red_fn), 2)
green_data = stretch_data(get_overview_data(green_fn), 2)
blue_data = stretch_data(get_overview_data(blue_fn), 2)
alpha = np.where(red_data + green_data + blue_data > 0, 1, 0)
data = np.dstack((red_data, green_data, blue_data, alpha))
plt.imshow(data)
plt.show()
######################### 13.1.3 Plotting 3D data ####################
# See listing 13.7.
##################### 13.2.2 Storing mapnik data as xml ###############
# Run listing 13.10 in order to save the xml file you need with the
# correct paths for your machine.
# Load the xml file and create an image from it.
m = mapnik.Map(400, 300)
m.zoom_to_box(mapnik.Box2d(-90.3, 29.7, -89.5, 30.3))
mapnik.load_map(m, r'd:\temp\nola_map.xml')
mapnik.render_to_file(m, r'd:\temp\nola4.png')
# See listing 13.9-edited to see the hydrography xml in action.
| mit |
luyaozou/PySpec | PySpec.py | 1 | 24448 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
"""
Integrated Python GUI for spectral analysis.
Designed functionalities:
> Plot spectra (x,y) file
> Fit spectral lines to common lineshape functions (Gaussian/Lorentzian/Voigt)
> Baseline removal
> Peak selection
> Catalog simulation (JPL/CDMS)
Package Requirments:
> numpy 1.8+
> scipy 0.16+
> PyQt5
> matplotlib
Written by Luyao Zou @ https://github.com/luyaozou
"""
__author__ = 'Luyao Zou, zouluyao@hotmail.com'
__version__ = 'Beta 0.1'
__date__ = 'Date: 07/22/2016'
from PyQt5 import QtWidgets, QtCore, QtGui
import sys
import os
import numpy as np
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
# custom module
import sflib
class FitParameter:
''' Store Fit Parameters '''
def __init__(self, peak):
self.ftype = 0 # function type
self.der = 0 # order of derivative (up to 4)
self.peak = peak # number of peaks
self.par_per_peak = 3 # number of parameters per peak
self.boxwin = 1 # boxcar smooth window
self.rescale = 1 # y intensity rescaler
self.deg = 0 # degree of polynomial baseline
self.par_name = self.get_par_name(0) # parameter name list
self.par = np.empty(self.par_per_peak*peak) # parameter vector
self.smooth_edge = False
def get_par_name(self, ftype):
if not ftype: # Gaussian type
return ['mu', 'sigma', 'A']
elif ftype == 1: # Lorentzian type
return ['mu', 'gamma', 'A']
def get_function(self):
return sflib.Function(self.ftype, self.der, self.peak)
class FitStatus:
''' Store Fit Status '''
def __init__(self):
self.stat = 2
self.input_valid = True
self.stat_dict = {0:'Fit successful',
1:'Fit failed',
2:'File not found',
3:'Unsupported file format',
4:'Baseline removal failed',
5:'Input Invalid'}
self.file_idx = 0
def print_stat(self):
return self.stat_dict[self.stat]
class FitMainGui(QtWidgets.QMainWindow):
# define main GUI window of the simulator
def __init__(self, parent=None): # initialize GUI
super().__init__()
# initialize fit parameter & fit status instance
self.fit_par = FitParameter(1)
self.fit_stat = FitStatus()
# initialize input validity tracker
self.fit_stat.input_valid = True
# get log directory (local directory of the script)
self.log_dir = os.getcwd()
# get file directory (read last directory from .cfg file)
self.current_dir = self.get_file_dir()
# add aborted and successful file list
self.list_aborted_file = []
self.list_success_file = []
# add menubar
openAction = QtWidgets.QAction('Open', self)
openAction.setShortcut('Ctrl+O')
openAction.setStatusTip('Open Spectra')
openAction.triggered.connect(self.open_file)
self.menu = self.menuBar()
self.menu.setNativeMenuBar(False)
self.menu.addAction(openAction)
# add status bar
self.statusbar = self.statusBar()
# set GUI layout
self.set_main_grid()
self.widget_main = QtWidgets.QWidget()
self.widget_main.setLayout(self.layout_main)
self.setCentralWidget(self.widget_main)
# set program title
self.setWindowTitle('Fit Spectra!')
# show program window
self.show()
def set_main_grid(self):
self.layout_main = QtWidgets.QGridLayout()
self.layout_main.setSpacing(6)
# add current_file label
self.label_current_file = QtWidgets.QLabel()
self.layout_main.addWidget(QtWidgets.QLabel('Current File:'), 0, 0)
self.layout_main.addWidget(self.label_current_file, 0, 1, 1, 2)
# add matplotlib canvas
self.fig = plt.figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocus()
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
self.click_counter = 0 # initialize click counter
# connect the canvas to matplotlib standard key press events
self.canvas.mpl_connect('key_press_event', self.mpl_key_press)
# connect the canvas to mouse click events
self.canvas.mpl_connect('button_press_event', self.mpl_click)
self.layout_main.addWidget(self.mpl_toolbar, 1, 0, 1, 3)
self.layout_main.addWidget(self.canvas, 2, 0, 1, 3)
# add fit option layout
self.layout_setting = QtWidgets.QGridLayout()
# select lineshape
self.combo_ftype = QtWidgets.QComboBox()
self.combo_ftype.addItems(['Gaussian', 'Lorentzian'])
# select number of derivatives
self.combo_der = QtWidgets.QComboBox()
self.combo_der.addItems(['0', '1', '2', '3', '4'])
self.check_boxcar = QtWidgets.QCheckBox('Boxcar Smooth?')
self.check_rescale = QtWidgets.QCheckBox('Rescale Intensity?')
self.edit_boxcar = QtWidgets.QLineEdit('1')
self.edit_rescale = QtWidgets.QLineEdit('1')
self.edit_deg = QtWidgets.QLineEdit('0')
self.edit_num_peak = QtWidgets.QLineEdit('1')
self.layout_setting.addWidget(QtWidgets.QLabel('Lineshape Function'), 0, 0)
self.layout_setting.addWidget(self.combo_ftype, 1, 0)
self.layout_setting.addWidget(QtWidgets.QLabel('Derivative'), 0, 1)
self.layout_setting.addWidget(self.combo_der, 1, 1)
self.layout_setting.addWidget(self.check_boxcar, 2, 0)
self.layout_setting.addWidget(self.edit_boxcar, 2, 1)
self.layout_setting.addWidget(self.check_rescale, 3, 0)
self.layout_setting.addWidget(self.edit_rescale, 3, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('PolyBaseline Degree'), 4, 0)
self.layout_setting.addWidget(self.edit_deg, 4, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('Number of Peaks'), 5, 0)
self.layout_setting.addWidget(self.edit_num_peak, 5, 1)
self.layout_setting.addWidget(QtWidgets.QLabel('<<< Initial Guess >>>'), 6, 0, 2, 2)
# connect signals
# select combo box items
self.combo_ftype.currentIndexChanged.connect(self.get_ftype)
self.combo_der.currentIndexChanged.connect(self.get_der)
# display/hide checked edit box
self.edit_boxcar.hide()
self.edit_rescale.hide()
self.check_boxcar.stateChanged.connect(self.show_boxcar)
self.check_rescale.stateChanged.connect(self.show_rescale)
# check input validity
self.edit_boxcar.textChanged.connect(self.check_int_validity)
self.edit_rescale.textChanged.connect(self.check_double_validity)
self.edit_deg.textChanged.connect(self.check_int_validity)
self.edit_num_peak.textChanged.connect(self.set_par_layout)
# add fit parameter layout for initial guess
self.widget_par = QtWidgets.QWidget()
self.layout_par = QtWidgets.QGridLayout()
self.edit_par = []
self.set_par_layout()
self.widget_par.setLayout(self.layout_par)
self.scroll_par = QtWidgets.QScrollArea()
self.scroll_par.setWidget(self.widget_par)
self.scroll_par.setWidgetResizable(True)
self.scroll_par.setMaximumHeight(600)
self.layout_setting.addWidget(self.scroll_par, 8, 0, 1, 2)
self.layout_main.addLayout(self.layout_setting, 2, 3)
# add fit & Quit button
btn_fit = QtWidgets.QPushButton('Fit Spectrum', self)
btn_quit = QtWidgets.QPushButton('Quit', self)
btn_quit.setShortcut('Ctrl+Q')
self.layout_main.addWidget(btn_fit, 0, 3)
self.layout_main.addWidget(btn_quit, 3, 3)
btn_fit.clicked.connect(self.fit_routine)
btn_quit.clicked.connect(self.close)
def set_par_layout(self):
text = self.edit_num_peak.text()
try:
self.fit_par.peak = abs(int(text))
green = '#00A352'
self.edit_num_peak.setStyleSheet('border: 3px solid %s' % green)
except ValueError:
red = '#D63333'
self.edit_num_peak.setStyleSheet('border: 3px solid %s' % red)
self.fit_par.peak = 0
# set initial guess layout
# clear previous parameters
self.fit_par.par = np.zeros(self.fit_par.peak * self.fit_par.par_per_peak)
self.edit_par = [] # clear previous widgets
self.clear_item(self.layout_par) # clear layout
self.click_counter = 0 # reset click counter
# add widgets
for i in range(self.fit_par.par_per_peak * self.fit_par.peak):
peak_index = i // self.fit_par.par_per_peak + 1
par_index = i % self.fit_par.par_per_peak
self.edit_par.append(QtWidgets.QLineEdit())
if par_index: # starting of a new peak
self.layout_par.addWidget(QtWidgets.QLabel(
'--- Peak {:d} ---'.format(peak_index)),
4*(peak_index-1), 0, 1, 2)
self.layout_par.addWidget(QtWidgets.QLabel(
self.fit_par.par_name[par_index]), i+peak_index, 0)
self.layout_par.addWidget(self.edit_par[i], i+peak_index, 1)
self.edit_par[i].setText('0.5') # set default value
self.edit_par[i].textChanged.connect(self.check_double_validity)
# --------- get all fitting options ---------
def get_ftype(self, ftype):
self.fit_par.ftype = ftype
self.fit_par.par_name = self.fit_par.get_par_name(ftype)
# refresh parameter layout
self.set_par_layout()
def get_der(self, der):
self.fit_par.der = der
def get_par(self):
# if input is valid
if self.fit_stat.input_valid == 2:
for i in range(self.fit_par.par_per_peak * self.fit_par.peak):
self.fit_par.par[i] = float(self.edit_par[i].text())
self.fit_par.boxwin = abs(int(self.edit_boxcar.text()))
self.fit_par.rescale = abs(float(self.edit_rescale.text()))
self.fit_par.deg = abs(int(self.edit_deg.text()))
else:
self.fit_stat.stat = 5
# --------- fit routine ---------
def fit_routine(self):
# if data loaded successfully
if not self.fit_stat.stat:
data_table, popt, uncertainty, ppoly = self.fit_try()
# if fit failed, print information
if self.fit_stat.stat:
failure = QtWidgets.QMessageBox.information(self, 'Failure',
self.fit_stat.print_stat(), QtWidgets.QMessageBox.Retry |
QtWidgets.QMessageBox.Abort, QtWidgets.QMessageBox.Retry)
# choose retry or ignore
if failure == QtWidgets.QMessageBox.Retry:
pass
elif failure == QtWidgets.QMessageBox.Abort:
self.pass_file()
# if fit successful, ask user for save|retry option
else:
success = QtWidgets.QMessageBox.question(self, 'Save?',
'Save the fit if it looks good. \n ' +
'Otherwise retry a fit or abort this file ',
QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Retry |
QtWidgets.QMessageBox.Abort, QtWidgets.QMessageBox.Save)
if success == QtWidgets.QMessageBox.Save:
# save file
self.save_file(data_table, popt, uncertainty, ppoly)
# go to next spectrum
self.next_file()
elif success == QtWidgets.QMessageBox.Retry:
pass
elif success == QtWidgets.QMessageBox.Abort:
self.pass_file()
def fit_try(self):
# get fitting parameters
self.get_par()
if not self.fit_stat.stat:
if self.fit_par.peak:
# get fit function
f = self.fit_par.get_function()
# re-load data with boxcar win and rescale
xdata, ydata = self.load_data()
popt, uncertainty, noise, ppoly, self.fit_stat.stat = sflib.fit_spectrum(f,
xdata, ydata, self.fit_par.par, self.fit_par.deg, self.fit_par.smooth_edge)
else: # if no peak, fit baseline
xdata, ydata = self.load_data()
popt, uncertainty, noise, ppoly, self.fit_stat.stat = sflib.fit_baseline(xdata, ydata, self.fit_par.deg)
# if fit successful, plot fit
if not self.fit_stat.stat and self.fit_par.peak:
# Make plot for successful fit
fit = f.get_func()(xdata, *popt)
baseline = np.polyval(ppoly, xdata - np.median(xdata))
residual = ydata - fit - baseline
self.statusbar.showMessage('Noise {:.4f}'.format(noise))
self.plot_spect(xdata, ydata, fit, baseline)
# concatenate data table
data_table = np.column_stack((xdata, ydata, fit, baseline))
return data_table, popt, uncertainty, ppoly
elif not self.fit_stat.stat:
baseline = np.polyval(ppoly, xdata - np.median(xdata))
residual = ydata - baseline
fit = np.zeros_like(ydata)
self.statusbar.showMessage('Noise {:.4f}'.format(noise))
self.plot_spect(xdata, ydata, fit, baseline)
data_table = np.column_stack((xdata, ydata, fit, baseline))
return data_table, popt, uncertainty, ppoly
else:
return None, None, None, None
def load_data(self): # load data
# check if there is a file name
try:
filename = '/'.join([self.current_dir, self.current_file])
except AttributeError:
select_file = QtWidgets.QMessageBox.warning(self, 'No File!',
'No spectrum file has been selected. Do you want to select now?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.Yes)
if select_file == QtWidgets.QMessageBox.Yes:
self.open_file()
else:
self.fit_stat.stat = 2 # exception: file not found
return None, None
# try load data
xdata, ydata, self.fit_stat.stat = sflib.read_file(filename,
self.fit_par.boxwin, self.fit_par.rescale)
# if file is readable, plot raw data and return xy data
if not self.fit_stat.stat:
self.plot_data(xdata, ydata)
return xdata, ydata
else:
return None, None
def plot_data(self, xdata, ydata): # plot raw data file before fit
self.fig.clear()
ax = self.fig.add_subplot(111)
ax.hold(False)
ax.plot(xdata, ydata, 'k-')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Intensity (a.u.)')
self.canvas.draw()
def plot_spect(self, xdata, ydata, fit, baseline): # plot fitted spectra
self.fig.clear()
ax = self.fig.add_subplot(111)
ax.plot(xdata, ydata, 'k-', xdata, fit+baseline, 'r-',
xdata, baseline, 'b--')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Intensity (a.u.)')
self.canvas.draw()
# --------- file handling ---------
def open_file(self):
# get all file names
QFileHandle = QtWidgets.QFileDialog()
QFileHandle.setDirectory(self.current_dir)
filelist = QFileHandle.getOpenFileNames(self, 'Open Spectra')[0]
# if list is not empty, proceed
if filelist:
# sort file name
filelist.sort()
# seperate directory name and file name
self.list_dir, self.list_file = sflib.separate_dir(filelist)
# get the first directory and file name
self.current_dir = self.list_dir[0]
self.current_file = self.list_file[0]
self.fit_stat.file_idx = 0
# update label
self.label_current_file.setText(self.current_file)
# launch fit routine
self.load_data()
else:
self.fit_stat.stat = 2
def pass_file(self):
try:
self.list_aborted_file.append('/'.join([self.current_dir, self.current_file]))
except AttributeError:
pass
self.next_file()
def save_file(self, data_table, popt, uncertainty, ppoly):
default_fitname = sflib.out_name_gen(self.current_file) + '.csv'
default_logname = sflib.out_name_gen(self.current_file) + '.log'
fitname = QtWidgets.QFileDialog.getSaveFileName(self,
'Save Current Fit Spectrum', '/'.join([self.current_dir, default_fitname]))[0]
if fitname:
sflib.save_fit(fitname, data_table, popt,
self.fit_par.ftype, self.fit_par.der, self.fit_par.peak)
logname = QtWidgets.QFileDialog.getSaveFileName(self,
'Save Current Fit Log', '/'.join([self.current_dir, default_logname]))[0]
if logname:
sflib.save_log(logname, popt, uncertainty, ppoly,
self.fit_par.ftype, self.fit_par.der,
self.fit_par.peak, self.fit_par.par_name)
self.list_success_file.append('/'.join([self.current_dir, self.current_file]))
def next_file(self):
# refresh current file index, fit status and click counter
self.fit_stat.file_idx += 1
self.fit_stat.stat = 0
self.click_counter = 0
try:
self.current_file = self.list_file[self.fit_stat.file_idx]
self.current_dir = self.list_dir[self.fit_stat.file_idx]
# update label text
self.label_current_file.setText(self.current_file)
# repeat fit routine
self.load_data()
except (IndexError, AttributeError):
eof = QtWidgets.QMessageBox.information(self, 'End of File',
'No more files to fit. Do you want to select new files?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Close,
QtWidgets.QMessageBox.Yes)
if eof == QtWidgets.QMessageBox.Yes:
self.open_file()
else:
self.close()
def get_file_dir(self):
try:
f = open('.prev_dir.log', 'r')
last_dir = f.readline()
f.close()
return last_dir
except FileNotFoundError:
return self.log_dir
def save_log(self):
log = QtWidgets.QFileDialog()
log.setNameFilter('Log files (*.log)')
logname = log.getSaveFileName(self, 'Save Fit Log',
'/'.join([self.current_dir, 'FitJob.log']))[0]
# if name is not empty
if logname:
pass
else:
logname = '/'.join([self.log_dir, 'FitJob.log'])
with open(logname, 'w') as a_log:
for file_name in self.list_success_file:
a_log.write('Successful --- {0:s}\n'.format(file_name))
for file_name in self.list_aborted_file:
a_log.write('Aborted --- {0:s}\n'.format(file_name))
def save_last_dir(self):
with open('.prev_dir.log', 'w') as a_file:
a_file.write(self.current_dir)
# --------- some useful little tools -----------
def show_boxcar(self, state):
if state == QtCore.Qt.Checked:
self.edit_boxcar.show()
else:
# make sure no boxcar (in case)
self.edit_boxcar.setText('1')
self.edit_boxcar.hide()
def show_rescale(self, state):
if state == QtCore.Qt.Checked:
self.edit_rescale.show()
else:
# no rescale
self.edit_rescale.setText('1')
self.edit_rescale.hide()
def check_double_validity(self, *args):
sender = self.sender()
validator = QtGui.QDoubleValidator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable and sender.text():
color = '#00A352' # green
self.fit_stat.input_valid = 2 # valid entry
elif not sender.text():
color = '#FF9933' # yellow
self.fit_stat.input_valid = 1 # empty entry
else:
color = '#D63333' # red
self.fit_stat.input_valid = 0 # invalid entry
sender.setStyleSheet('border: 3px solid %s' % color)
def check_int_validity(self, *args):
sender = self.sender()
validator = QtGui.QIntValidator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable and sender.text():
color = '#00A352' # green
self.fit_stat.input_valid = 2 # valid entry
elif not sender.text():
color = '#FF9933' # yellow
self.fit_stat.input_valid = 1 # empty entry
else:
color = '#D63333' # red
self.fit_stat.input_valid = 0 # invalid entry
sender.setStyleSheet('border: 3px solid %s' % color)
def clear_item(self, layout): # clears all elements in the layout
if layout is not None:
while layout.count():
item = layout.takeAt(0)
w = item.widget()
if w is not None:
w.deleteLater()
else:
self.clear_item(item.layout())
def mpl_key_press(self, event):
# matplotlib standard key press event
key_press_handler(event, self.canvas, self.mpl_toolbar)
def mpl_click(self, event):
# if can still pick peak position
if (self.click_counter < self.fit_par.peak) and (self.click_counter >= 0):
# update counter
self.click_counter += 1
# retrieve cooridate upon mouse click
mu = event.xdata # peak center
a = event.ydata*0.1 # peak intensity
# locate parameter index in the parameter list
mu_idx = self.fit_par.par_per_peak * (self.click_counter-1)
a_idx = mu_idx + self.fit_par.par_per_peak - 1
self.edit_par[mu_idx].setText(str(mu))
self.edit_par[a_idx].setText(str(a))
elif self.click_counter >= self.fit_par.peak:
# click number overloads. As user to reset
reset = QtWidgets.QMessageBox.question(self, 'Reset?',
'Do you want to reset clicks to override peak selection?',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reset == QtWidgets.QMessageBox.Yes:
self.click_counter = 0
elif reset == QtWidgets.QMessageBox.No:
self.click_counter = -1 # no longer bother in this session
else:
event.ignore()
def closeEvent(self, event): # exit warning
quit_confirm = QtWidgets.QMessageBox.question(self, 'Quit?',
'Are you sure to quit?', QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
if quit_confirm == QtWidgets.QMessageBox.Yes:
#try:
# save fit job log file
self.save_log()
self.save_last_dir()
#except:
# pass
event.accept()
else:
event.ignore()
def keyPressEvent(self, event): # press ESC to exit
if event.key() == QtCore.Qt.Key_Escape:
self.close()
# ------ run script ------
if __name__ == '__main__':
args = sys.argv
# get around the gtk error on linux systems (sacrifice the gui appearance)
#if sys.platform == 'linux':
# args.append('-style')
# args.append('Cleanlooks')
app = QtWidgets.QApplication(args)
launch = FitMainGui()
sys.exit(app.exec_())
| gpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont_supersolar_5/MoreLines.py | 3 | 7227 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("More Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('MoreLines1.pdf')
plt.clf()
| gpl-2.0 |
elijah513/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
PythonCharmers/bokeh | bokeh/charts/_data_adapter.py | 43 | 8802 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the ChartObject class, a minimal prototype class to build more chart
types on top of it. It provides the mechanisms to support the shared chained
methods.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
from collections import OrderedDict
from ..properties import bokeh_integer_types, Datetime
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import blaze
except ImportError:
blaze=None
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_INDEX_ALIASES = list('abcdefghijklmnopqrstuvz1234567890')
DEFAULT_INDEX_ALIASES += list(zip(DEFAULT_INDEX_ALIASES, DEFAULT_INDEX_ALIASES))
class DataAdapter(object):
"""
Adapter object used to normalize Charts inputs to a common interface.
Supported inputs are dict, list, tuple, np.ndarray and pd.DataFrame.
"""
def __init__(self, data, index=None, columns=None, force_alias=True):
self.__values = data
self._values = self.validate_values(data)
self.convert_index_to_int = False
self._columns_map = {}
self.convert_items_to_dict = False
if columns is None and force_alias:
# no column 'labels' defined for data... in this case we use
# default names
keys = getattr(self._values, 'keys', None)
if callable(keys):
columns = list(keys())
elif keys is None:
columns = list(map(str, range(len(data))))
else:
columns = list(keys)
if columns:
self._columns = columns
# define a mapping between the real keys to access data and the aliases
# we have defined using 'columns'
self._columns_map = dict(zip(columns, self.keys()))
if index is not None:
self._index = index
self.convert_items_to_dict = True
elif force_alias:
_index = getattr(self._values, 'index', None)
# check because if it is a callable self._values is not a
# dataframe (probably a list)
if _index is None:
indexes = self.index
if isinstance(indexes[0], int):
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
elif not callable(_index):
self._index = list(_index)
self.convert_items_to_dict = True
else:
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
@staticmethod
def is_number(value):
numbers = (float, ) + bokeh_integer_types
return isinstance(value, numbers)
@staticmethod
def is_datetime(value):
try:
dt = Datetime(value)
dt # shut up pyflakes
return True
except ValueError:
return False
@staticmethod
def validate_values(values):
if np and isinstance(values, np.ndarray):
if len(values.shape) == 1:
return np.array([values])
else:
return values
elif pd and isinstance(values, pd.DataFrame):
return values
elif isinstance(values, (dict, OrderedDict)):
if all(DataAdapter.is_number(x) for x in values.values()):
return values
return values
elif isinstance(values, (list, tuple)):
if all(DataAdapter.is_number(x) for x in values):
return [values]
return values
elif hasattr(values, '__array__'):
values = pd.DataFrame(np.asarray(values))
return values
# TODO: Improve this error message..
raise TypeError("Input type not supported! %s" % values)
def index_converter(self, x):
key = self._columns_map.get(x, x)
if self.convert_index_to_int:
key = int(key)
return key
def keys(self):
# assuming it's a dict or dataframe
keys = getattr(self._values, "keys", None)
if callable(keys):
return list(keys())
elif keys is None:
self.convert_index_to_int = True
indexes = range(len(self._values))
return list(map(str, indexes))
else:
return list(keys)
def __len__(self):
return len(self.values())
def __iter__(self):
for k in self.keys():
yield k
def __getitem__(self, key):
val = self._values[self.index_converter(key)]
# if we have "index aliases" we need to remap the values...
if self.convert_items_to_dict:
val = dict(zip(self._index, val))
return val
def values(self):
return self.normalize_values(self._values)
@staticmethod
def normalize_values(values):
_values = getattr(values, "values", None)
if callable(_values):
return list(_values())
elif _values is None:
return values
else:
# assuming it's a dataframe, in that case it returns transposed
# values compared to it's dict equivalent..
return list(_values.T)
def items(self):
return [(key, self[key]) for key in self]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
@property
def columns(self):
try:
return self._columns
except AttributeError:
return list(self.keys())
@property
def index(self):
try:
return self._index
except AttributeError:
index = getattr(self._values, "index", None)
if not callable(index) and index is not None:
# guess it's a pandas dataframe..
return index
# no, it's not. So it's probably a list so let's get the
# values and check
values = self.values()
if isinstance(values, dict):
return list(values.keys())
else:
first_el = self.values()[0]
if isinstance(first_el, dict):
indexes = list(first_el.keys())
else:
indexes = range(0, len(self.values()[0]))
self._index = indexes
return indexes
#-----------------------------------------------------------------------------
# Convenience methods
#-----------------------------------------------------------------------------
@staticmethod
def get_index_and_data(values, index=None):
"""Parse values (that must be one of the DataAdapter supported
input types) and create an separate/create index and data
depending on values type and index.
Args:
values (iterable): container that holds data to be plotted using
on the Chart classes
Returns:
A tuple of (index, values), where: ``index`` is an iterable that
represents the data index and ``values`` is an iterable containing
the values to be plotted.
"""
_values = DataAdapter(values, force_alias=False)
if hasattr(values, 'keys'):
if index is not None:
if isinstance(index, string_types):
xs = _values[index]
else:
xs = index
else:
try:
xs = _values.index
except AttributeError:
xs = values.index
else:
if index is None:
xs = _values.index
elif isinstance(index, string_types):
xs = _values[index]
else:
xs = index
return xs, _values
| bsd-3-clause |
adrienpacifico/openfisca-france-data | openfisca_france_data/input_data_builders/build_eipp_survey_data/eipp_utils.py | 3 | 8464 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from numpy import array
from pandas import ExcelFile
current_dir = os.path.dirname(os.path.realpath(__file__))
variables_corresp = os.path.join(current_dir, 'correspondances_eipp_OF.xlsx')
print variables_corresp
def build_ipp2of_variables():
'''
Création du dictionnaire dont les clefs sont les noms des variables IPP
et les arguments ceux des variables OF
'''
def _dic_corresp(onglet):
names = ExcelFile(variables_corresp).parse(onglet)
return dict(array(names.loc[names['equivalence'].isin([1, 5, 8]), ['Var_TAXIPP', 'Var_OF']]))
ipp2of_input_variables = _dic_corresp('input')
ipp2of_output_variables = _dic_corresp('output')
return ipp2of_input_variables, ipp2of_output_variables
def build_input_OF(data, ipp2of_input_variables, tax_benefit_system):
print 'On commence buid_input'
def _qui(data, entity):
qui = "qui" + entity
print 'qui', qui
#idi = "id" + entity
data[qui] = 2 # TODO incrémenter les pac du foyer/enfats des familles/autres personnes des ménages
data.loc[data['decl'] == 1, qui] = 0
data.loc[data['conj'] == 1, qui] = 1
if entity == "men":
data.loc[data['concu'] == 1, qui] = 1
#j = 2
# while any(data.duplicated([qui, idi])):
# data.loc[data.duplicated([qui, idi]), qui] = j + 1
# j += 1
return data[qui]
def _so(data): # TODO: mettre directement la variable de EE au lieu de passer par TAXIPP
data["so"] = 0
data.loc[data['proprio_empr'] == 1, 'so'] = 1
data.loc[data['proprio'] == 1, 'so'] = 2
data.loc[data['locat'] == 1, 'so'] = 4
data.loc[data['loge'] == 1, 'so'] = 6
return data['so']
def _compl(var):
var = 1 - var
var = var.astype(int)
return var
# TODO: refaire cette fonction avec la nouvelle propagation des variables entre les membres d'une entité
# (cf. work at Etalab)
# def _count_by_entity(data, var, entity, bornes):
# ''' Compte le nombre de 'var compris entre les 'bornes' au sein de l''entity' '''
# id = 'id' + entity
# qui = 'qui' + entity
# data.index = data[id]
# cond = (bornes[0] <= data[var])*(data[var] <= bornes[1])*(data[qui] > 1)
# print cond
# col = DataFrame(data.loc[cond, :].groupby(id).size(), index = data.index).fillna(0)
# col.reset_index()
# return col
#
# def _count_enf(data):
# data["f7ea"] = _count_by_entity(data, 'age', 'foy', [11, 14]) # nb enfants ff au collège (11-14)
# data["f7ec"] = _count_by_entity(data, 'age', 'foy', [15, 17]) # #nb enfants ff au lycée 15-17
# data["f7ef"] = _count_by_entity(data, 'age', 'foy', [18, 99]) # nb enfants ff enseignement sup > 17
# data = data.drop(["nenf1113", "nenf1415", "nenf1617", "nenfmaj1819", "nenfmaj20", "nenfmaj21plus", "nenfnaiss",
# "nenf02", "nenf35", "nenf610"], axis = 1)
# data.index = range(len(data))
# return data
def _workstate(data):
# TODO: titc should be filled in to deal with civil servant
data['chpub'] = 0
data.loc[data['public'] == 1, 'chpub'] = 1
data.loc[data['public'] == 0, 'chpub'] = 6
# Activité : [0'Actif occupé', 1'Chômeur', 2'Étudiant, élève', 3'Retraité', 4'Autre inactif']), default = 4)
# act5 : [0"Salarié",1"Indépendant",2"Chômeur",3"Retraité",4"Inactif"] => pas utilisé ?
data.loc[data['fi'] == 1, 'activite'] = 0
data.loc[data['fi'] == 2, 'activite'] = 1
data.loc[data['fi'] == 3, 'activite'] = 2
data.loc[data['fi'] == 4, 'activite'] = 0 # TODO: Les militaires
data.loc[data['fi'] == 5, 'activite'] = 3 # TODO: cf. gestion de l'homogénéisation EE de IPP (les retraités semblent être inclus dans la catégorie inactif)
data.loc[data['fi'] == 6, 'activite'] = 4
data['statut'] = 8
data.loc[data['public'] == 1, 'statut'] = 11
# [0"Non renseigné/non pertinent",1"Exonéré",2"Taux réduit",3"Taux plein"]
data['taux_csg_remplacement'] = 3
data.loc[data['csg_exo'] == 1, 'taux_csg_remplacement'] = 1
data.loc[data['csg_part'] == 1, 'taux_csg_remplacement'] = 2
data = data.drop(['csg_tout', 'csg_exo', 'csg_part'], axis = 1)
# data['ebic_impv'] = 20000
data['exposition_accident'] = 0
return data
data['nbj'] = 0 # TODO: rajouter Nombre d'enfants majeurs célibataires sans enfant
data['nbh'] = 4 # TODO: rajouter Nombre d'enfants à charge en résidence alternée, non mariés de moins de 18 ans au 1er janvier de l'année n-1, ou nés en n-1 ou handicapés quel que soit l'âge
data['stat_prof'] = 0 # TODO: rajouter le statut professionnel dans eipp
def _var_to_ppe(data):
data['ppe_du_sa'] = 0
data.loc[data['stat_prof'] == 0, 'ppe_du_sa'] = data.loc[data['stat_prof'] == 0, 'nbh']
data['ppe_du_ns'] = 0
data.loc[data['stat_prof'] == 1, 'ppe_du_ns'] = data.loc[data['stat_prof'] == 1, 'nbj']
data['ppe_tp_sa'] = 0
data.loc[(data['stat_prof'] == 0) & (data['nbh'] >= 151.67 * 12), 'ppe_tp_sa'] = 1
data['ppe_tp_ns'] = 0
data.loc[(data['stat_prof'] == 1) & (data['nbj'] >= 360), 'ppe_tp_ns'] = 1
return data
data['inactif'] = 0 #TODO:
def _var_to_pfam(data):
data.loc[(data['activite'].isin([3, 4, 5, 6])), 'inactif'] = 1
data.loc[(data['activite'] == 1) & (data['chom_irpp'] == 0), 'inactif'] = 1
data.loc[(data['activite'] == 0) & (data['sal_irpp'] == 0), 'inactif'] = 1
data['partiel1'] = 0
data.loc[(data['nbh'] / 12 <= 77) & (data['nbh'] / 12 > 0), 'partiel1'] = 1
data['partiel2'] = 0
data.loc[(data['nbh'] / 12 <= 151) & (data['nbh'] / 12 > 77), 'partiel2'] = 1
return data
print 'avant', list(data.columns.values)
data.rename(columns = ipp2of_input_variables, inplace = True)
print 'après', list(data.columns.values)
print 'On a fait rename'
data['quifoy'] = _qui(data, 'foy')
print 'test'
min_idfoy = data["idfoy"].min()
if min_idfoy > 0:
data["idfoy"] -= min_idfoy
data['quimen'] = _qui(data, 'men')
min_idmen = data["idmen"].min()
if min_idmen > 0:
data["idmen"] -= min_idmen
min_idfam = data["idfam"].min()
if min_idfam > 0:
data["idfam"] -= min_idfam
data["idfam"] = data["idmen"] # TODO: rajouter les familles dans TAXIPP
data["quifam"] = data['quimen']
# print data[['idfoy','idmen', 'quimen','quifoy', 'decl', 'conj', 'con2']].to_string()
data['so'] = _so(data)
# data = _count_enf(data)
data = _workstate(data)
#data["caseN"] = _compl(data["caseN"]) #TODO: faire une fonction pour gérer l'imputation de cette case
data = _var_to_ppe(data)
data = _var_to_pfam(data)
data['invalide'] = 0
print 'On va dropper'
variables_to_drop = [
variable
for variable in data.columns
if variable not in tax_benefit_system.column_by_name
]
print 'data.columns', data.columns
print 'tax_benefit_system.column_by_name', tax_benefit_system.column_by_name
print 'variables_to_drop', variables_to_drop
#print data.iloc[44:]
# data.drop(variables_to_drop, axis = 1, inplace = True)
print 'youpi on a droppé'
# data.rename(columns = {"id_conj" : "conj"}, inplace = True)
data['age_en_mois'] = data['age'] * 12
return data
| agpl-3.0 |
shusenl/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
MariaRigaki/kaggle | africa/predict.py | 1 | 1802 | __author__ = 'marik0'
#!/usr/bin/env python
# coding: utf-8
"""
prediction code for regression
"""
import sys
import numpy as np
import pandas as pd
from pylearn2.utils import serial
from theano import tensor as T
from theano import function
if __name__ == "__main__":
try:
model_path = sys.argv[1]
test_path = sys.argv[2]
out_path = sys.argv[3]
except IndexError:
print "Usage: predict.py <model file> <test file> <output file>"
print " predict.py saved_model.pkl test_x.csv predictions.csv\n"
quit(-1)
print "loading model..."
try:
model = serial.load(model_path)
except Exception, e:
print "error loading {}:".format(model_path)
print e
quit(-1)
print "setting up symbolic expressions..."
X = model.get_input_space().make_theano_batch()
Y = model.fprop(X)
# Get both the probability and the class
#Y = T.max_and_argmax(Y, axis=1)
f = function([X], Y)
print "loading data and predicting..."
# Use pandas to read the CSV
df = pd.read_csv(test_path)
df.drop('PIDN', axis=1, inplace=True)
co2_bands = ['m2379.76', 'm2377.83', 'm2375.9', 'm2373.97', 'm2372.04', 'm2370.11',
'm2368.18', 'm2366.26', 'm2364.33', 'm2362.4', 'm2360.47', 'm2358.54',
'm2356.61', 'm2354.68', 'm2352.76']
df.drop(co2_bands, axis=1, inplace=True)
x = np.array(df)[:, :3578]
y = f(x.astype(dtype=np.float32))
# Make sure we get the correct number of outputs
print len(y)
print "writing predictions..."
res_df = pd.read_csv('data/sample_submission.csv')
#res_df['Ca'] = y
#res_df['P'] = y
#res_df['pH'] = y
#res_df['SOC'] = y
res_df['Sand'] = y
res_df.to_csv(out_path, index=False)
| mit |
yutiansut/QUANTAXIS | QUANTAXIS/QAApplication/OldBacktest.py | 2 | 3257 | # @Hakase
import QUANTAXIS as QA
import numpy as np
import pandas as pd
import datetime
import sys
import random
class backtest():
"""依据回测场景的建模
"""
def __init__(self, start_time='2015-01-01', end_time='2018-09-24', init_cash=500000, code='RBL8', frequence=QA.FREQUENCE.FIFTEEN_MIN):
self.start_time = start_time
self.end_time = end_time
self.frequence = frequence
self.code = code
self.init_cach = init_cash
self.time_ = None
self.market_data_ = None
self.res = False
@property
def position(self):
return self.account.sell_available.get(self.code, 0)
@property
def time(self):
return self.time_
@property
def market_data(self):
return self.market_data_
# 自定义函数-------------------------------------------------------------------
@property
def hold_judge(self):
"""仓位判断器
Returns:
[type] -- [description]
"""
if self.account.cash/self.account.init_cash < 0.3:
return False
else:
return True
def before_backtest(self):
raise NotImplementedError
def before(self, *args, **kwargs):
self.before_backtest()
self.data_min = QA.QA_fetch_future_min_adv(
self.code, self.start_time, self.end_time, frequence=self.frequence)
self.data_day = QA.QA_fetch_future_day_adv(
self.code, self.start_time, self.end_time)
self.Broker = QA.QA_BacktestBroker()
def model(self, *arg, **kwargs):
raise NotImplementedError
def load_strategy(self, *arg, **kwargs):
# self.load_model(func1)
raise NotImplementedError
def run(self, *arg, **kwargs):
raise NotImplementedError
def buy(self, pos, towards):
self.account.receive_simpledeal(code=self.code,
trade_price=self.market_data.open, trade_amount=pos,
trade_towards=towards, trade_time=self.time,
message=towards)
def sell(self, pos, towards):
self.account.receive_simpledeal(code=self.code,
trade_price=self.market_data.open, trade_amount=pos,
trade_towards=towards, trade_time=self.time,
message=towards)
def main(self, *arg, **kwargs):
print(vars(self))
self.identity_code = '_'.join([str(x) for x in list(kwargs.values())])
self.backtest_cookie = 'future_{}_{}'.format(
datetime.datetime.now().time().__str__()[:8], self.identity_code)
self.account = QA.QA_Account(allow_sellopen=True, allow_t0=True, account_cookie=self.backtest_cookie,
market_type=QA.MARKET_TYPE.FUTURE_CN, frequence=self.frequence, init_cash=self.init_cash)
self.gen = self.data_min.reindex(
self.res) if self.res else self.data_min
for ind, item in self.gen.iterrows:
self.time_ = ind[0]
self.code = ind[1]
self.market_data_ = item
self.run()
| mit |
cl4rke/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
allisony/aplpy | aplpy/labels.py | 2 | 16255 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
import warnings
import numpy as np
import matplotlib.pyplot as mpl
from matplotlib.font_manager import FontProperties
from . import wcs_util
from . import angle_util as au
from .decorators import auto_refresh, fixdocstring
class TickLabels(object):
def __init__(self, parent):
# Store references to axes
self._ax1 = parent._ax1
self._ax2 = parent._ax2
self._wcs = parent._wcs
self._figure = parent._figure
# Save plotting parameters (required for @auto_refresh)
self._parameters = parent._parameters
# Set font
self._label_fontproperties = FontProperties()
self.set_style('plain')
system, equinox, units = wcs_util.system(self._wcs)
# Set default label format
if self._wcs.xaxis_coord_type in ['longitude', 'latitude']:
if system['name'] == 'equatorial':
if self._wcs.xaxis_coord_type == 'longitude':
self.set_xformat("hh:mm:ss.ss")
else:
self.set_xformat("dd:mm:ss.s")
else:
self.set_xformat("ddd.dddd")
else:
self.set_xformat('%g')
if self._wcs.yaxis_coord_type in ['longitude', 'latitude']:
if system['name'] == 'equatorial':
if self._wcs.yaxis_coord_type == 'longitude':
self.set_yformat("hh:mm:ss.ss")
else:
self.set_yformat("dd:mm:ss.s")
else:
self.set_yformat("ddd.dddd")
else:
self.set_yformat('%g')
# Set major tick formatters
fx1 = WCSFormatter(wcs=self._wcs, coord='x')
fy1 = WCSFormatter(wcs=self._wcs, coord='y')
self._ax1.xaxis.set_major_formatter(fx1)
self._ax1.yaxis.set_major_formatter(fy1)
fx2 = mpl.NullFormatter()
fy2 = mpl.NullFormatter()
self._ax2.xaxis.set_major_formatter(fx2)
self._ax2.yaxis.set_major_formatter(fy2)
# Cursor display
self._ax1._cursor_world = True
self._figure.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
@auto_refresh
def set_xformat(self, format):
'''
Set the format of the x-axis tick labels.
If the x-axis type is ``longitude`` or ``latitude``, then the options
are:
* ``ddd.ddddd`` - decimal degrees, where the number of decimal places can be varied
* ``hh`` or ``dd`` - hours (or degrees)
* ``hh:mm`` or ``dd:mm`` - hours and minutes (or degrees and arcminutes)
* ``hh:mm:ss`` or ``dd:mm:ss`` - hours, minutes, and seconds (or degrees, arcminutes, and arcseconds)
* ``hh:mm:ss.ss`` or ``dd:mm:ss.ss`` - hours, minutes, and seconds (or degrees, arcminutes, and arcseconds), where the number of decimal places can be varied.
If the x-axis type is ``scalar``, then the format should be a valid
python string format beginning with a ``%``.
If one of these arguments is not specified, the format for that axis
is left unchanged.
'''
if self._wcs.xaxis_coord_type in ['longitude', 'latitude']:
if format.startswith('%'):
raise Exception("Cannot specify Python format for longitude or latitude")
try:
if not self._ax1.xaxis.apl_auto_tick_spacing:
au._check_format_spacing_consistency(format, self._ax1.xaxis.apl_tick_spacing)
except au.InconsistentSpacing:
warnings.warn("WARNING: Requested label format is not accurate enough to display ticks. The label format will not be changed.")
return
else:
if not format.startswith('%'):
raise Exception("For scalar tick labels, format should be a Python format beginning with %")
self._ax1.xaxis.apl_label_form = format
self._ax2.xaxis.apl_label_form = format
@auto_refresh
def set_yformat(self, format):
'''
Set the format of the y-axis tick labels.
If the y-axis type is ``longitude`` or ``latitude``, then the options
are:
* ``ddd.ddddd`` - decimal degrees, where the number of decimal places can be varied
* ``hh`` or ``dd`` - hours (or degrees)
* ``hh:mm`` or ``dd:mm`` - hours and minutes (or degrees and arcminutes)
* ``hh:mm:ss`` or ``dd:mm:ss`` - hours, minutes, and seconds (or degrees, arcminutes, and arcseconds)
* ``hh:mm:ss.ss`` or ``dd:mm:ss.ss`` - hours, minutes, and seconds (or degrees, arcminutes, and arcseconds), where the number of decimal places can be varied.
If the y-axis type is ``scalar``, then the format should be a valid
python string format beginning with a ``%``.
If one of these arguments is not specified, the format for that axis
is left unchanged.
'''
if self._wcs.yaxis_coord_type in ['longitude', 'latitude']:
if format.startswith('%'):
raise Exception("Cannot specify Python format for longitude or latitude")
try:
if not self._ax1.yaxis.apl_auto_tick_spacing:
au._check_format_spacing_consistency(format, self._ax1.yaxis.apl_tick_spacing)
except au.InconsistentSpacing:
warnings.warn("WARNING: Requested label format is not accurate enough to display ticks. The label format will not be changed.")
return
else:
if not format.startswith('%'):
raise Exception("For scalar tick labels, format should be a Python format beginning with %")
self._ax1.yaxis.apl_label_form = format
self._ax2.yaxis.apl_label_form = format
@auto_refresh
def set_style(self, style):
"""
Set the format of the x-axis tick labels.
This can be 'colons' or 'plain':
* 'colons' uses colons as separators, for example 31:41:59.26 +27:18:28.1
* 'plain' uses letters and symbols as separators, for example 31h41m59.26s +27º18'28.1"
"""
if style == 'latex':
warnings.warn("latex has now been merged with plain - whether or not to use LaTeX is controlled through set_system_latex")
style = 'plain'
if not style in ['colons', 'plain']:
raise Exception("Label style should be one of colons/plain")
self._ax1.xaxis.apl_labels_style = style
self._ax1.yaxis.apl_labels_style = style
self._ax2.xaxis.apl_labels_style = style
self._ax2.yaxis.apl_labels_style = style
@auto_refresh
@fixdocstring
def set_font(self, family=None, style=None, variant=None, stretch=None, weight=None, size=None, fontproperties=None):
"""
Set the font of the tick labels.
Parameters
----------
common: family, style, variant, stretch, weight, size, fontproperties
Notes
-----
Default values are set by matplotlib or previously set values if
set_font has already been called. Global default values can be set by
editing the matplotlibrc file.
"""
if family:
self._label_fontproperties.set_family(family)
if style:
self._label_fontproperties.set_style(style)
if variant:
self._label_fontproperties.set_variant(variant)
if stretch:
self._label_fontproperties.set_stretch(stretch)
if weight:
self._label_fontproperties.set_weight(weight)
if size:
self._label_fontproperties.set_size(size)
if fontproperties:
self._label_fontproperties = fontproperties
for tick in self._ax1.get_xticklabels():
tick.set_fontproperties(self._label_fontproperties)
for tick in self._ax1.get_yticklabels():
tick.set_fontproperties(self._label_fontproperties)
for tick in self._ax2.get_xticklabels():
tick.set_fontproperties(self._label_fontproperties)
for tick in self._ax2.get_yticklabels():
tick.set_fontproperties(self._label_fontproperties)
@auto_refresh
def show(self):
"""
Show the x- and y-axis tick labels.
"""
self.show_x()
self.show_y()
@auto_refresh
def hide(self):
"""
Hide the x- and y-axis tick labels.
"""
self.hide_x()
self.hide_y()
@auto_refresh
def show_x(self):
"""
Show the x-axis tick labels.
"""
for tick in self._ax1.get_xticklabels():
tick.set_visible(True)
for tick in self._ax2.get_xticklabels():
tick.set_visible(True)
@auto_refresh
def hide_x(self):
"""
Hide the x-axis tick labels.
"""
for tick in self._ax1.get_xticklabels():
tick.set_visible(False)
for tick in self._ax2.get_xticklabels():
tick.set_visible(False)
@auto_refresh
def show_y(self):
"""
Show the y-axis tick labels.
"""
for tick in self._ax1.get_yticklabels():
tick.set_visible(True)
for tick in self._ax2.get_yticklabels():
tick.set_visible(True)
@auto_refresh
def hide_y(self):
"""
Hide the y-axis tick labels.
"""
for tick in self._ax1.get_yticklabels():
tick.set_visible(False)
for tick in self._ax2.get_yticklabels():
tick.set_visible(False)
@auto_refresh
def set_xposition(self, position):
"""
Set the position of the x-axis tick labels ('top' or 'bottom')
"""
if position == 'bottom':
fx1 = WCSFormatter(wcs=self._wcs, coord='x')
self._ax1.xaxis.set_major_formatter(fx1)
fx2 = mpl.NullFormatter()
self._ax2.xaxis.set_major_formatter(fx2)
elif position == 'top':
fx1 = mpl.NullFormatter()
self._ax1.xaxis.set_major_formatter(fx1)
fx2 = WCSFormatter(wcs=self._wcs, coord='x')
self._ax2.xaxis.set_major_formatter(fx2)
else:
raise ValueError("position should be one of 'top' or 'bottom'")
@auto_refresh
def set_yposition(self, position):
"""
Set the position of the y-axis tick labels ('left' or 'right')
"""
if position == 'left':
fy1 = WCSFormatter(wcs=self._wcs, coord='y')
self._ax1.yaxis.set_major_formatter(fy1)
fy2 = mpl.NullFormatter()
self._ax2.yaxis.set_major_formatter(fy2)
elif position == 'right':
fy1 = mpl.NullFormatter()
self._ax1.yaxis.set_major_formatter(fy1)
fy2 = WCSFormatter(wcs=self._wcs, coord='y')
self._ax2.yaxis.set_major_formatter(fy2)
else:
raise ValueError("position should be one of 'left' or 'right'")
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'c':
self._ax1._cursor_world = not self._ax1._cursor_world
def _cursor_position(self, x, y):
xaxis = self._ax1.xaxis
yaxis = self._ax1.yaxis
if self._ax1._cursor_world:
xw, yw = wcs_util.pix2world(self._wcs, x, y)
if self._wcs.xaxis_coord_type in ['longitude', 'latitude']:
xw = au.Angle(degrees=xw, latitude=self._wcs.xaxis_coord_type == 'latitude')
hours = 'h' in xaxis.apl_label_form
if hours:
xw = xw.tohours()
if xaxis.apl_labels_style in ['plain', 'latex']:
sep = ('d', 'm', 's')
if hours:
sep = ('h', 'm', 's')
elif xaxis.apl_labels_style == 'colons':
sep = (':', ':', '')
xlabel = xw.tostringlist(format=xaxis.apl_label_form, sep=sep)
xlabel = "".join(xlabel)
else:
xlabel = xaxis.apl_label_form % xw
if self._wcs.yaxis_coord_type in ['longitude', 'latitude']:
yw = au.Angle(degrees=yw, latitude=self._wcs.yaxis_coord_type == 'latitude')
hours = 'h' in yaxis.apl_label_form
if hours:
yw = yw.tohours()
if yaxis.apl_labels_style in ['plain', 'latex']:
sep = ('d', 'm', 's')
if hours:
sep = ('h', 'm', 's')
elif yaxis.apl_labels_style == 'colons':
sep = (':', ':', '')
ylabel = yw.tostringlist(format=yaxis.apl_label_form, sep=sep)
ylabel = "".join(ylabel)
else:
ylabel = yaxis.apl_label_form % yw
return "%s %s (world)" % (xlabel, ylabel)
else:
return "%g %g (pixel)" % (x, y)
class WCSFormatter(mpl.Formatter):
def __init__(self, wcs=False, coord='x'):
self._wcs = wcs
self.coord = coord
def __call__(self, x, pos=None):
"""
Return the format for tick val x at position pos; pos=None indicated
unspecified
"""
self.coord_type = self._wcs.xaxis_coord_type if self.coord == 'x' else self._wcs.yaxis_coord_type
if self.coord_type in ['longitude', 'latitude']:
au._check_format_spacing_consistency(self.axis.apl_label_form, self.axis.apl_tick_spacing)
hours = 'h' in self.axis.apl_label_form
if self.axis.apl_labels_style == 'plain':
if mpl.rcParams['text.usetex']:
label_style = 'plain_tex'
else:
label_style = 'plain_notex'
else:
label_style = self.axis.apl_labels_style
if label_style == 'plain_notex':
sep = ('\u00b0', "'", '"')
if hours:
sep = ('h', 'm', 's')
elif label_style == 'colons':
sep = (':', ':', '')
elif label_style == 'plain_tex':
if hours:
sep = ('^{h}', '^{m}', '^{s}')
else:
sep = ('^{\circ}', '^{\prime}', '^{\prime\prime}')
ipos = np.argmin(np.abs(self.axis.apl_tick_positions_pix - x))
label = self.axis.apl_tick_spacing * self.axis.apl_tick_positions_world[ipos]
if hours:
label = label.tohours()
label = label.tostringlist(format=self.axis.apl_label_form, sep=sep)
# Check if neighboring label is similar and if so whether some
# elements of the current label are redundant and can be dropped.
# This should only be done for sexagesimal coordinates
if len(label) > 1:
if self.coord == x or self.axis.apl_tick_positions_world[ipos] > 0:
comp_ipos = ipos - 1
else:
comp_ipos = ipos + 1
if comp_ipos >= 0 and comp_ipos <= len(self.axis.apl_tick_positions_pix) - 1:
comp_label = self.axis.apl_tick_spacing * self.axis.apl_tick_positions_world[comp_ipos]
if hours:
comp_label = comp_label.tohours()
comp_label = comp_label.tostringlist(format=self.axis.apl_label_form, sep=sep)
for iter in range(len(label)):
if comp_label[0] == label[0]:
label.pop(0)
comp_label.pop(0)
else:
break
else:
ipos = np.argmin(np.abs(self.axis.apl_tick_positions_pix - x))
label = self.axis.apl_tick_spacing * self.axis.apl_tick_positions_world[ipos]
label = self.axis.apl_label_form % label
if mpl.rcParams['text.usetex']:
return "$" + "".join(label) + "$"
else:
return "".join(label)
| mit |
datapythonista/pandas | pandas/tests/indexes/object/test_indexing.py | 3 | 4370 | import numpy as np
import pytest
import pandas as pd
from pandas import Index
import pandas._testing as tm
class TestGetLoc:
def test_get_loc_raises_object_nearest(self):
index = Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="nearest")
def test_get_loc_raises_object_tolerance(self):
index = Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="pad", tolerance="invalid")
class TestGetIndexer:
@pytest.mark.parametrize(
"method,expected",
[
("pad", np.array([-1, 0, 1, 1], dtype=np.intp)),
("backfill", np.array([0, 0, 1, -1], dtype=np.intp)),
],
)
def test_get_indexer_strings(self, method, expected):
index = Index(["b", "c"])
actual = index.get_indexer(["a", "b", "c", "d"], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = Index(["b", "c"])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="nearest")
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2)
with pytest.raises(TypeError, match=msg):
index.get_indexer(
["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2]
)
def test_get_indexer_with_NA_values(
self, unique_nulls_fixture, unique_nulls_fixture2
):
# GH#22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
index = Index(arr, dtype=object)
result = index.get_indexer(
[unique_nulls_fixture, unique_nulls_fixture2, "Unknown"]
)
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
class TestSliceLocs:
@pytest.mark.parametrize(
"in_slice,expected",
[
# error: Slice index must be an integer or None
(pd.IndexSlice[::-1], "yxdcb"),
(pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc]
(pd.IndexSlice["b"::-1], "b"), # type: ignore[misc]
(pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc]
(pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc]
# absent labels
(pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc]
(pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc]
(pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc]
(pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc]
(pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc]
(pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc]
(pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc]
],
)
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list("bcdxy"))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
result = index[s_start : s_stop : in_slice.step]
expected = Index(list(expected))
tm.assert_index_equal(result, expected)
def test_slice_locs_dup(self):
index = Index(["a", "a", "b", "c", "d", "d"])
assert index.slice_locs("a", "d") == (0, 6)
assert index.slice_locs(end="d") == (0, 6)
assert index.slice_locs("a", "c") == (0, 4)
assert index.slice_locs("b", "d") == (2, 6)
index2 = index[::-1]
assert index2.slice_locs("d", "a") == (0, 6)
assert index2.slice_locs(end="a") == (0, 6)
assert index2.slice_locs("d", "b") == (0, 4)
assert index2.slice_locs("c", "a") == (2, 6)
| bsd-3-clause |
alekz112/statsmodels | statsmodels/datasets/cpunish/data.py | 25 | 2597 | """US Capital Punishment dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """::
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/cpunish.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause |
sld/computer_vision_workshop | kaggle/MNIST_recognizer/digit_recognizer.py | 1 | 3248 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import numpy.random as random
import matplotlib.pyplot as plt
from sklearn import neighbors, datasets, cross_validation
import cv2
import csv
import sys
class DigitClassifier:
def __init__(self, data_source, k=10):
self.data = data_source.data()
self.__process_data()
self.__make_classifier(k)
def train(self):
self.classifier.fit(self.feature_vectors, self.labels)
def classify(self, feature_vectors):
labels = self.classifier.predict(feature_vectors)
return labels
def score(self):
vectors_train, vectors_test, labels_train, labels_test = cross_validation \
.train_test_split(self.feature_vectors, self.labels, test_size=0.2, random_state=100)
self.classifier.fit(vectors_train, labels_train)
score = self.classifier.score(vectors_test, labels_test)
return score
def make_feature_vector(self, image):
return image.flatten()
def __process_data(self):
self.labels = []
self.feature_vectors = []
for image_and_label in self.data:
label = image_and_label[0]
image = image_and_label[1]
feature_vector = self.make_feature_vector(image)
self.labels.append(label)
self.feature_vectors.append(feature_vector)
def __make_classifier(self, k):
self.classifier = neighbors.KNeighborsClassifier(k)
class DigitDataProcessor:
def __init__(self, csv_file, is_train=True, thresh_val=233):
self.pandas_data = pd.read_csv(csv_file)
self.thresh_val = thresh_val
self.is_train = is_train
self.__process_data()
# Format: [[label, image], ...]
def data(self):
return self.processed_data
def __process_data(self):
self.__make_processed_data()
def __make_processed_data(self):
self.processed_data = []
for arr in self.pandas_data.values:
label, image = self.__get_label_and_image(arr)
self.processed_data.append([label, self.__process_image(image)])
def __process_image(self, image):
image = image.astype(np.uint8).reshape(28,28)
# resized = cv2.resize(image, (10,10))
# ret, thresholded = cv2.threshold(image, self.thresh_val, 255, cv2.THRESH_BINARY)
return image
def __get_label_and_image(self, arr):
if self.is_train:
return [arr[0], arr[1:]]
else:
return [0, arr[0:]]
class Runner:
def cross_validation(self):
train_data_source = DigitDataProcessor('./data/train.csv')
dc = DigitClassifier(train_data_source)
print dc.score()
def make_what_kaggle_needs(self):
# Training classifier
train_data_source = DigitDataProcessor('./data/train.csv')
dc = DigitClassifier(train_data_source, 10)
dc.train()
print "After Train!"
# Predicting values
test_data_source = DigitDataProcessor('./data/test.csv', False)
images = [label_and_image[1] for label_and_image in test_data_source.data()]
feature_vectors = [dc.make_feature_vector(image) for image in images]
labels = dc.classify(feature_vectors)
print "After Classifying!"
c = csv.writer(open("./predict2.csv", "wb"))
c.writerow(["ImageId", "Label"])
for index, label in enumerate(labels):
c.writerow([index+1, label])
Runner().make_what_kaggle_needs()
| mit |
sobomax/virtualbox_64bit_edd | src/VBox/ValidationKit/testmanager/webui/wuihlpgraph.py | 2 | 4253 | # -*- coding: utf-8 -*-
# $Id: wuihlpgraph.py $
"""
Test Manager Web-UI - Graph Helpers.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
class WuiHlpGraphDataTable(object): # pylint: disable=R0903
"""
Data table container.
"""
class Row(object): # pylint: disable=R0903
"""A row."""
def __init__(self, sGroup, aoValues, asValues = None):
self.sName = sGroup;
self.aoValues = aoValues;
if asValues is None:
self.asValues = [str(oVal) for oVal in aoValues];
else:
assert len(asValues) == len(aoValues);
self.asValues = asValues;
def __init__(self, sGroupLable, asMemberLabels):
self.aoTable = [ WuiHlpGraphDataTable.Row(sGroupLable, asMemberLabels), ];
def addRow(self, sGroup, aoValues, asValues = None):
"""Adds a row to the data table."""
self.aoTable.append(WuiHlpGraphDataTable.Row(sGroup, aoValues, asValues));
return True;
def getGroupCount(self):
"""Gets the number of data groups (rows)."""
return len(self.aoTable) - 1;
class WuiHlpGraphDataTableEx(object): # pylint: disable=R0903
"""
Data container for an table/graph with optional error bars on the Y values.
"""
class DataSeries(object): # pylint: disable=R0903
"""
A data series.
The aoXValues, aoYValues and aoYErrorBars are parallel arrays, making a
series of (X,Y,Y-err-above-delta,Y-err-below-delta) points.
The error bars are optional.
"""
def __init__(self, sName, aoXValues, aoYValues, asHtmlTooltips = None, aoYErrorBarBelow = None, aoYErrorBarAbove = None):
self.sName = sName;
self.aoXValues = aoXValues;
self.aoYValues = aoYValues;
self.asHtmlTooltips = asHtmlTooltips;
self.aoYErrorBarBelow = aoYErrorBarBelow;
self.aoYErrorBarAbove = aoYErrorBarAbove;
def __init__(self, sXUnit, sYUnit):
self.sXUnit = sXUnit;
self.sYUnit = sYUnit;
self.aoSeries = [];
def addDataSeries(self, sName, aoXValues, aoYValues, asHtmlTooltips = None, aoYErrorBarBelow = None, aoYErrorBarAbove = None):
"""Adds an data series to the table."""
self.aoSeries.append(WuiHlpGraphDataTableEx.DataSeries(sName, aoXValues, aoYValues, asHtmlTooltips,
aoYErrorBarBelow, aoYErrorBarAbove));
return True;
def getDataSeriesCount(self):
"""Gets the number of data series."""
return len(self.aoSeries);
#
# Dynamically choose implementation.
#
if True:
from testmanager.webui import wuihlpgraphgooglechart as GraphImplementation;
else:
try:
import matplotlib; # pylint: disable=W0611,F0401
from testmanager.webui import wuihlpgraphmatplotlib as GraphImplementation;
except:
from testmanager.webui import wuihlpgraphsimple as GraphImplementation;
# pylint: disable=C0103
WuiHlpBarGraph = GraphImplementation.WuiHlpBarGraph;
WuiHlpLineGraph = GraphImplementation.WuiHlpLineGraph;
WuiHlpLineGraphErrorbarY = GraphImplementation.WuiHlpLineGraphErrorbarY;
| gpl-2.0 |
hitszxp/scikit-learn | sklearn/learning_curve.py | 28 | 13300 | """Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import _check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(_check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
zorroblue/scikit-learn | examples/svm/plot_iris.py | 65 | 3742 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (svm.SVC(kernel='linear', C=C),
svm.LinearSVC(C=C),
svm.SVC(kernel='rbf', gamma=0.7, C=C),
svm.SVC(kernel='poly', degree=3, C=C))
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = ('SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel')
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('Sepal length')
ax.set_ylabel('Sepal width')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/utils/tests/test_testing.py | 7 | 8098 | import warnings
import unittest
import sys
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (
assert_raises,
assert_less,
assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
assert_allclose_dense_sparse,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_assert_less():
assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
def test_assert_greater():
assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_allclose_dense_sparse():
x = np.arange(9).reshape(3, 3)
msg = "Not equal to tolerance "
y = sparse.csc_matrix(x)
for X in [x, y]:
# basic compare
assert_raise_message(AssertionError, msg, assert_allclose_dense_sparse,
X, X * 2)
assert_allclose_dense_sparse(X, X)
assert_raise_message(ValueError, "Can only compare two sparse",
assert_allclose_dense_sparse, x, y)
A = sparse.diags(np.ones(5), offsets=0).tocsr()
B = sparse.csr_matrix(np.ones((1, 5)))
assert_raise_message(AssertionError, "Arrays are not equal",
assert_allclose_dense_sparse, B, A)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
# `clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/statistics/multiple_histograms_side_by_side.py | 1 | 3066 | """
==========================================
Producing multiple histograms side by side
==========================================
This example plots horizontal histograms of different samples along
a categorical x-axis. Additionally, the histograms are plotted to
be symmetrical about their x-position, thus making them very similar
to violin plots.
To make this highly specialized plot, we can't use the standard ``hist``
method. Instead we use ``barh`` to draw the horizontal bars directly. The
vertical positions and lengths of the bars are computed via the
``np.histogram`` function. The histograms for all the samples are
computed using the same range (min and max values) and number of bins,
so that the bins for each sample are in the same vertical positions.
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(19680801)
number_of_bins = 20
# An example of three data sets to compare
number_of_data_points = 387
labels = ["A", "B", "C"]
data_sets = [np.random.normal(0, 1, number_of_data_points),
np.random.normal(6, 1, number_of_data_points),
np.random.normal(-3, 1, number_of_data_points)]
# Computed quantities to aid plotting
hist_range = (np.min(data_sets), np.max(data_sets))
binned_data_sets = [
np.histogram(d, range=hist_range, bins=number_of_bins)[0]
for d in data_sets
]
binned_maximums = np.max(binned_data_sets, axis=1)
x_locations = np.arange(0, sum(binned_maximums), np.max(binned_maximums))
# The bin_edges are the same for all of the histograms
bin_edges = np.linspace(hist_range[0], hist_range[1], number_of_bins + 1)
centers = 0.5 * (bin_edges + np.roll(bin_edges, 1))[:-1]
heights = np.diff(bin_edges)
# Cycle through and plot each histogram
fig, ax = plt.subplots()
for x_loc, binned_data in zip(x_locations, binned_data_sets):
lefts = x_loc - 0.5 * binned_data
ax.barh(centers, binned_data, height=heights, left=lefts)
ax.set_xticks(x_locations)
ax.set_xticklabels(labels)
ax.set_ylabel("Data values")
ax.set_xlabel("Data sets")
pltshow(plt)
| mit |
glennq/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
paulbrodersen/netgraph | netgraph/_interactive_variants.py | 1 | 23119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
InteractiveGraph variants.
"""
import numpy as np
import matplotlib.pyplot as plt
import itertools
from functools import partial
from matplotlib.patches import Rectangle
try:
from ._main import InteractiveGraph, BASE_SCALE
from ._line_supercover import line_supercover
except ValueError:
from _main import InteractiveGraph, BASE_SCALE
from _line_supercover import line_supercover
class InteractiveGrid(InteractiveGraph):
"""
As InteractiveGraph, but node positions are fixed to a grid with unit spacing.
Pressing 'g' will show the grid lines.
Pressing 't' will show all tiles occupied by a node or crossed over by an egde.
NOTE:
-----
For this class, the default netgraph node size and edge width are probably far too small for a medium sized graph.
In my experience, for a graph with 20-50 nodes, a node size of 45 and an edge width of 15 tend to work well.
Change your code accordingly. For example:
g = InteractiveGrid(graph, node_size=45, edge_width=15)
"""
def __init__(self, *args, **kwargs):
super(InteractiveGrid, self).__init__(*args, **kwargs)
self.show_tiles = False
self.tiles = []
self.gridlines = []
self.show_grid = False
self.fig.canvas.mpl_connect('key_press_event', self._on_key_toggle)
def _get_node_positions(self, edge_list, **kwargs):
"""
Initialise node positions to be on a grid with unit spacing.
Prevent two points from occupying the same position.
"""
node_positions = super(InteractiveGrid, self)._get_node_positions(edge_list, **kwargs)
if len(kwargs) == 0: # i.e. using defaults
# check if any two points will occupy the same grid point
unique_grid_positions = set([(int(x), int(y)) for (x, y) in node_positions.values()])
if len(unique_grid_positions) < len(node_positions):
# rescale node positions such that each node occupies it's own grid point;
keys = np.array(list(node_positions.keys()))
values = np.array(list(node_positions.values()))
distances = np.sqrt(np.sum(np.power(values[None,:,:] - values[:,None,:], 2), axis=-1))
distances = np.triu(distances, 1)
sources, targets = np.where(distances)
distances = distances[sources, targets]
order = np.argsort(distances)
for ii in order:
s = keys[sources[ii]]
t = keys[targets[ii]]
ps = node_positions[s]
pt = node_positions[t]
if np.all(np.isclose(ps.astype(np.int), pt.astype(np.int))):
minium_difference = np.min(np.abs(ps-pt))
scale = 1./minium_difference
break
node_positions = {k : (v * scale).astype(np.int) for k,v in node_positions.items()}
return node_positions
def _on_release(self, event):
if self._currently_dragging:
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
# set node positions to nearest grid point
for node in nodes:
x, y = self.node_positions[node]
x = np.int(np.round(x))
y = np.int(np.round(y))
self.node_positions[node] = (x, y)
self._update_nodes(nodes)
self._update_edges(nodes)
if self.show_grid:
self._draw_grid()
if self.show_tiles:
self._draw_tiles(color='b', alpha=0.1)
self.fig.canvas.draw_idle()
super(InteractiveGrid, self)._on_release(event)
def _draw_grid(self):
xlim = [np.int(x) for x in self.ax.get_xlim()]
for x in range(*xlim):
line = self.ax.axvline(x, color='k', alpha=0.1, linestyle='--')
self.gridlines.append(line)
ylim = [np.int(y) for y in self.ax.get_ylim()]
for y in range(*ylim):
line = self.ax.axhline(y, color='k', alpha=0.1, linestyle='--')
self.gridlines.append(line)
def _remove_grid(self):
for line in self.gridlines:
line.remove()
self.gridlines = []
def _get_tile_positions(self):
# find tiles through which a each edge crosses using the line supercover
# (an extension of Bresenheims algorithm)
tile_positions = []
for (v0, v1) in self.edge_list:
x0, y0 = self.node_positions[v0]
x1, y1 = self.node_positions[v1]
x0 = np.int(np.round(x0))
y0 = np.int(np.round(y0))
x1 = np.int(np.round(x1))
y1 = np.int(np.round(y1))
x, y = line_supercover(x0, y0, x1, y1)
tile_positions.extend(zip(x.tolist(), y.tolist()))
# remove duplicates
tile_positions = list(set(tile_positions))
return tile_positions
def _draw_tiles(self, *args, **kwargs):
# remove old tiles:
# TODO: only remove tiles that are no longer in the set of positions
self._remove_tiles()
dx = 1. # TODO: generalise to arbitrary tile sizes
dy = 1.
positions = self._get_tile_positions()
for (x, y) in positions:
x -= dx/2.
y -= dy/2.
rect = Rectangle((x,y), dx, dy, *args, **kwargs)
self.tiles.append(rect)
self.ax.add_artist(rect)
def _remove_tiles(self):
for tile in self.tiles:
tile.remove()
self.tiles = []
def _on_key_toggle(self, event):
# print('you pressed', event.key, event.xdata, event.ydata)
if event.key is 't':
if self.show_tiles is False:
self.show_tiles = True
self._draw_tiles(color='b', alpha=0.1)
else:
self.show_tiles = False
self._remove_tiles()
if event.key is 'g':
if self.show_grid is False:
self.show_grid = True
self._draw_grid()
else:
self.show_grid = False
self._remove_grid()
self.fig.canvas.draw_idle()
def demo_InteractiveGrid():
n = 4
adj = np.ones((n,n))
adj = np.triu(adj, 1)
pos = np.random.rand(n,2)
pos[:,0] *= 10
pos[:,1] *= 5
pos = {ii:xy for ii, xy in enumerate(pos)}
fig, ax = plt.subplots()
ax.set(xlim=[0, 10], ylim=[0, 5])
g = InteractiveGrid(adj, pos, ax=ax, node_size=15.)
return g
class InteractiveHypergraph(InteractiveGraph):
"""
As InteractiveGraph, but nodes can be combined into a hypernode.
Pressing 'c' will fuse selected node artists into a single node.
"""
def __init__(self, *args, **kwargs):
super(InteractiveHypergraph, self).__init__(*args, **kwargs)
# bookkeeping
self.hypernode_to_nodes = dict()
# for redrawing after fusion
self.kwargs = kwargs
# set up ability to trigger fusion by key-press
self.fig.canvas.mpl_connect('key_press_event', self._on_key_group_ungroup)
def _on_key_group_ungroup(self, event):
if event.key == 'c':
if len(self._selected_artists) > 1:
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
self._deselect_all_artists()
self._combine(nodes)
else:
print("Only a single artist selected! Nothing to combine.")
def _combine(self, nodes):
# create hypernode ID
# hypernode = _find_unused_int(self.edge_list)
hypernode = tuple(set(nodes))
# bookkeeping
self.hypernode_to_nodes[hypernode] = nodes
# create hypernode
self._create_hypernode(nodes, hypernode)
# create corresponding edges
new_edge_list = self._transfer_edges_to_hypernode(self.edge_list, nodes, hypernode)
new_edges = [edge for edge in new_edge_list if not edge in self.edge_list]
old_edges = [edge for edge in self.edge_list if not edge in new_edge_list]
self._create_hypernode_edges(old_edges, new_edges)
# update graph structure
self.edge_list = list(set(new_edge_list))
# clean up data structures and remove obsolote artists
for edge in old_edges:
self._delete_edge(edge)
for node in nodes:
self._delete_node(node)
# draw new state
self.fig.canvas.draw_idle()
def _create_hypernode(self, nodes, hypernode, combine_properties=partial(np.mean, axis=0)):
"""
Combine properties of nodes that will form hypernode.
Draw hypernode.
"""
# combine node / node artist properties
pos = combine_properties([self.node_positions[node] for node in nodes])
node_size = combine_properties([self.node_edge_artists[node].radius for node in nodes])
node_edge_width = combine_properties([self.node_face_artists[node].radius for node in nodes]); node_edge_width = node_size - node_edge_width
node_color = combine_properties([self.node_face_artists[node].get_facecolor() for node in nodes]) # NB: this only makes sense for a gray cmap
node_edge_color = combine_properties([self.node_edge_artists[node].get_facecolor() for node in nodes]) # NB: this only makes sense for a gray cmap
node_alpha = combine_properties([self.node_face_artists[node].get_alpha() for node in nodes])
node_edge_alpha = combine_properties([self.node_edge_artists[node].get_alpha() for node in nodes])
# update data
self.node_positions[hypernode] = pos
self._base_alpha[hypernode] = node_alpha
# draw hypernode
self.draw_nodes({hypernode:pos}, # has to be {} not dict()!
node_size=node_size / BASE_SCALE,
node_edge_width=node_edge_width / BASE_SCALE,
node_color=node_color,
node_edge_color=node_edge_color,
node_alpha=node_alpha,
node_edge_alpha=node_edge_alpha,
ax=self.ax)
# add to draggable artists
hypernode_artist = self.node_face_artists[hypernode]
self._draggable_artists.append(hypernode_artist)
self._node_to_draggable_artist[hypernode] = hypernode_artist
self._draggable_artist_to_node[hypernode_artist] = hypernode
self._base_alpha[hypernode_artist] = hypernode_artist.get_alpha()
if hasattr(self, 'node_labels'):
# # TODO: call to `input` results in unresponsive plot and terminal; fix / find workaround
# hypernode_label = input("Please provide a new label for the hypernode and press enter (default {}):\n".format(hypernode))
# if hypernode_label == '':
# hypernode_label = str(hypernode)
hypernode_label = [self.node_label[node] for node in nodes]
hypernode_label = ',\n'.join(hypernode_label)
self.node_labels[hypernode] = hypernode_label
if hasattr(self, 'node_label_font_size'):
self.draw_node_labels({hypernode:hypernode_label}, {hypernode:pos}, node_label_font_size=self.node_label_font_size) # has to be {} not dict()!
else:
self.draw_node_labels({hypernode:hypernode_label}, {hypernode:pos}) # has to be {} not dict()!
# def _delete_node(self, node):
# del self.node_positions[node]
# self.node_face_artists[node].remove()
# del self.node_face_artists[node]
# self.node_edge_artists[node].remove()
# del self.node_edge_artists[node]
# if hasattr(self, 'node_labels'):
# self.node_label_artists[node].remove()
# del self.node_label_artists[node]
# del self.node_labels[node]
def _delete_node(self, node):
self.node_face_artists[node].set_visible(False)
self.node_edge_artists[node].set_visible(False)
if hasattr(self, 'node_labels'):
self.node_label_artists[node].set_visible(False)
artist = self._node_to_draggable_artist[node]
del self._draggable_artist_to_node[artist]
del self._node_to_draggable_artist[node]
def _transfer_edges_to_hypernode(self, edge_list, nodes, hypernode):
"""
Note:
- does not remove self-loops
- may contain duplicate edges after fusion
"""
# replace nodes in `nodes` with hypernode
new_edge_list = []
for (source, target) in edge_list:
if source in nodes:
source = hypernode
if target in nodes:
target = hypernode
new_edge_list.append((source, target))
return new_edge_list
def _create_hypernode_edges(self, old_edges, new_edges, combine_properties=partial(np.mean, axis=0)):
"""
For each unique new edge, take corresponding old edges.
Create new edge artists based on properties of corresponding old edge artists.
"""
# find edges that are being combined
new_to_old = dict()
for new_edge, old_edge in zip(new_edges, old_edges):
try:
new_to_old[new_edge].append(old_edge)
except KeyError:
new_to_old[new_edge] = [old_edge]
# combine edge properties
edge_width = dict()
edge_color = dict()
edge_alpha = dict()
for new_edge, old_edges in new_to_old.items():
# filter old_edges: self-loops have no edge artists
old_edges = [(source, target) for (source, target) in old_edges if source != target]
# combine properties
edge_width[new_edge] = combine_properties([self.edge_artists[edge].width for edge in old_edges]) / BASE_SCALE
edge_color[new_edge] = combine_properties([self.edge_artists[edge].get_facecolor() for edge in old_edges]) # NB: this only makes sense for a gray cmap; combine weights instead?
# edge_alpha[new_edge] = combine_properties([self.edge_artists[edge].get_alpha() for edge in old_edges]) # TODO: .get_alpha() returns None?
# zorder = _get_zorder(self.edge_color) # TODO: fix, i.e. get all edge colors, determine order
# remove duplicates in new_edges
new_edges = new_to_old.keys()
# don't plot self-loops
new_edges = [(source, target) for (source, target) in new_edges if source != target]
self.draw_edges(new_edges,
node_positions=self.node_positions,
edge_width=edge_width,
edge_color=edge_color,
# edge_alpha=edge_alpha,
ax=self.ax)
def _delete_edge(self, edge):
# del self.edge_weight[edge] # TODO: get / set property for hyperedges; otherwise these raises KeyError
# del self.edge_color[edge] # TODO: get / set property for hyperedges; otherwise these raises KeyError
# del self.edge_zorder[edge] # TODO: get / set property for hyperedges; otherwise these raises KeyError
source, target = edge
if source != target: # i.e. skip self-loops as they have no corresponding artist
self.edge_artists[edge].remove()
del self.edge_artists[edge]
# if hasattr(self, 'edge_labels'):
# del self.edge_labels[edge] # TODO: get / set property for hyperedges; otherwise these raises KeyError
# edge_label_artists[edge].remove() # TODO: get / set property for hyperedges; otherwise these raises KeyError
# del self.edge_label_artists[edge] # TODO: get / set property for hyperedges; otherwise these raises KeyError
class InteractivelyConstructDestroyGraph(InteractiveGraph):
"""
Interactively add and remove nodes and edges.
Pressing 'A' will add a node to the graph.
Pressing 'D' will remove a selected node.
Pressing 'a' will add edges between all selected nodes.
Pressing 'd' will remove edges between all selected nodes.
Pressing 'r' will reverse the direction of edges between all selected nodes.
See also:
---------
InteractiveGraph, Graph, draw
"""
def __init__(self, *args, **kwargs):
super(InteractivelyConstructDestroyGraph, self).__init__(*args, **kwargs)
# link node/edge construction/destruction to key presses
self.fig.canvas.mpl_connect('key_press_event', self._on_key_add_or_destroy)
def _on_key_add_or_destroy(self, event):
if event.key == 'A':
self._add_node(event)
elif event.key == 'a':
self._add_edges()
elif event.key == 'D':
self._delete_nodes()
elif event.key == 'd':
self._delete_edges()
elif event.key == 'r':
self._reverse_edges()
else:
pass
self.fig.canvas.draw_idle()
def _add_node(self, event):
# create node ID; use smallest unused int
node = 0
while node in self.node_positions.keys():
node += 1
# get position of cursor place node at cursor position
pos = event.xdata, event.ydata
self.node_positions[node] = pos
# draw node
self.draw_nodes({node:pos}, **self.kwargs)
# add to draggable artists
node_artist = self.node_face_artists[node]
self._draggable_artists.append(node_artist)
self._node_to_draggable_artist[node] = node_artist
self._draggable_artist_to_node[node_artist] = node
self._base_alpha[node_artist] = node_artist.get_alpha()
def _add_edges(self):
# translate selected artists into nodes
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
# iterate over all pairs of selected nodes and create edges between nodes that are not already connected
# new_edges = [(source, target) for source, target in itertools.permutations(nodes, 2) if (source != target) and (not (source, target) in self.edge_list)] # bidirectional
new_edges = [(source, target) for source, target in itertools.combinations(nodes, 2) if (source != target) and (not (source, target) in self.edge_list)] # unidirectional
# add new edges to edge_list and corresponding artists to canvas
self.edge_list.extend(new_edges)
self.draw_edges(self.edge_list, node_positions=self.node_positions, **self.kwargs)
def _delete_nodes(self):
# translate selected artists into nodes
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
# delete edges to and from selected nodes
edges = [(source, target) for (source, target) in self.edge_list if ((source in nodes) or (target in nodes))]
for edge in edges:
self._delete_edge(edge)
# delete nodes
for node in nodes:
self._delete_node(node)
def _delete_node(self, node):
# c.f. InteractiveHypergraph !
if hasattr(self, 'node_labels'):
self.node_label_artists[node].remove()
del self.node_label_artists[node]
artist = self._node_to_draggable_artist[node]
del self._draggable_artist_to_node[artist]
# del self._node_to_draggable_artist[node] # -> self.node_face_artists[node].remove()
self.node_face_artists[node].remove()
self.node_edge_artists[node].remove()
del self.node_face_artists[node]
del self.node_edge_artists[node]
def _delete_edges(self):
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
# delete edges between selected nodes
edges = [(source, target) for (source, target) in self.edge_list if ((source in nodes) and (target in nodes))]
for edge in edges:
self._delete_edge(edge)
def _delete_edge(self, edge):
# c.f. InteractiveHypergraph !
# delete attributes of edge
if hasattr(self, 'edge_weight'):
if isinstance(self.edge_weight, dict):
if edge in self.edge_weight:
del self.edge_weight[edge]
if hasattr(self, 'edge_color'):
if isinstance(self.edge_color, dict):
if edge in self.edge_color:
del self.edge_color[edge]
if hasattr(self, 'edge_zorder'):
if isinstance(self.edge_zorder, dict):
if edge in self.edge_zorder:
del self.edge_zorder[edge]
# delete artists
source, target = edge
if source != target: # i.e. skip self-loops as they have no corresponding artist
self.edge_artists[edge].remove()
del self.edge_artists[edge]
if hasattr(self, 'edge_labels'):
del self.edge_labels[edge]
self.edge_label_artists[edge].remove()
del self.edge_label_artists[edge]
# delete edge
self.edge_list.remove(edge)
def _reverse_edges(self):
# translate selected artists into nodes
nodes = [self._draggable_artist_to_node[artist] for artist in self._selected_artists]
# grab all edges between selected nodes
old_edges = [(source, target) for source, target in itertools.permutations(nodes, 2) if (source, target) in self.edge_list]
# reverse edges
new_edges = [edge[::-1] for edge in old_edges]
# copy attributes
for old_edge, new_edge in zip(old_edges, new_edges):
self._copy_edge_attributes(old_edge, new_edge)
# remove edges that are being replaced
for edge in old_edges:
self._delete_edge(edge)
# add new edges to edge_list and corresponding artists to canvas
self.edge_list.extend(new_edges)
self.draw_edges(self.edge_list, node_positions=self.node_positions, **self.kwargs)
def _copy_edge_attributes(self, source, target):
if hasattr(self, 'edge_weight'):
if isinstance(self.edge_weight, dict):
if source in self.edge_weight:
self.edge_weight[target] = self.edge_weight[source]
if hasattr(self, 'edge_color'):
if isinstance(self.edge_color, dict):
if source in self.edge_color:
self.edge_color[target] = self.edge_color[source]
if hasattr(self, 'edge_zorder'):
if isinstance(self.edge_zorder, dict):
if source in self.edge_zorder:
self.edge_zorder[target] = self.edge_zorder[source]
| gpl-3.0 |
GGoussar/scikit-image | doc/examples/transform/plot_register_translation.py | 14 | 2717 | """
=====================================
Cross-Correlation (Phase Correlation)
=====================================
In this example, we use phase correlation to identify the relative shift
between two similar-sized images.
The ``register_translation`` function uses cross-correlation in Fourier space,
optionally employing an upsampled matrix-multiplication DFT to achieve
arbitrary subpixel precision. [1]_
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
image = data.camera()
shift = (-2.4, 1.32)
# (-2.4, 1.32) pixel offset relative to reference coin
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x):")
print(shift)
# pixel precision first
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# View the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x):")
print(shift)
# subpixel precision
shift, error, diffphase = register_translation(image, offset_image, 100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Calculate the upsampled DFT, again to show what the algorithm is doing
# behind the scenes. Constants correspond to calculated values in routine.
# See source code for details.
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected subpixel offset (y, x):")
print(shift)
| bsd-3-clause |
ahealy19/F-IDE-2016 | make_fig1.py | 1 | 1660 | import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame
"""
read a csv file and render a stacked bar chart showing each prover's
results for 60 second timeout
Andrew Healy, Aug. 2016
"""
df = DataFrame.from_csv('fig1_data.csv')
provers = df.index
N = len(provers)
valids = list(df['Valid'])
unknown = list(df['Unknown'])
timeout = list(df['Timeout'])
failure = list(df['Failure'])
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
p1 = plt.bar(ind, valids, width, color='1.0')
p2 = plt.bar(ind, unknown, width, color='0.55',
bottom=valids)
bottom = [unknown[i]+valids[i] for i in xrange(N)]
p3 = plt.bar(ind, timeout, width, bottom=bottom, color='0.8')
bottom = [bottom[i]+timeout[i] for i in xrange(N)]
p4 = plt.bar(ind, failure, width, bottom=bottom, color='0.3')
plt.ylabel('Number of proof obligations')
plt.xticks(ind, provers, rotation = 30)
plt.yticks(np.arange(0, df.ix[0].sum(), 100))
plt.legend((p1[0], p2[0], p3[0], p4[0]),
('Valid', 'Unknown', 'Timeout', 'Failure'),
loc='upper center', ncol=4,
bbox_to_anchor=(0.5, 1.05))
ind = np.arange(N)
# to stack them, change the y in xy
for i,v in enumerate(valids):
plt.annotate(str(v), xy=(ind[i]+width+0.05,v/2.-0.5))
for i,u in enumerate(unknown):
plt.annotate(str(u), xy=(ind[i]+width+0.05,valids[i]+u/2.-0.5))
for i,t in enumerate(timeout):
plt.annotate(str(t), xy=(ind[i]+width+0.05,valids[i]+unknown[i]+t/2.-0.5))
for i,f in enumerate(failure):
plt.annotate(str(f), xy=(ind[i]+width+0.05,valids[i]+unknown[i]+timeout[i]+f/2.-0.5))
plt.savefig('paper/barcharts.pdf', bbox_inches='tight') | apache-2.0 |
klaus385/openpilot | panda/tests/tucan_loopback.py | 2 | 3331 | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
import random
import argparse
from hexdump import hexdump
from itertools import permutations
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda
def get_test_string():
return b"test"+os.urandom(10)
def run_test(sleep_duration):
pandas = Panda.list()
print(pandas)
if len(pandas) == 0:
print("NO PANDAS")
assert False
if len(pandas) == 1:
# if we only have one on USB, assume the other is on wifi
pandas.append("WIFI")
run_test_w_pandas(pandas, sleep_duration)
def run_test_w_pandas(pandas, sleep_duration):
h = list(map(lambda x: Panda(x), pandas))
print("H", h)
for hh in h:
hh.set_controls_allowed(True)
# test both directions
for ho in permutations(range(len(h)), r=2):
print("***************** TESTING", ho)
panda0, panda1 = h[ho[0]], h[ho[1]]
if(panda0._serial == "WIFI"):
print(" *** Can not send can data over wifi panda. Skipping! ***")
continue
# **** test health packet ****
print("health", ho[0], h[ho[0]].health())
# **** test K/L line loopback ****
for bus in [2,3]:
# flush the output
h[ho[1]].kline_drain(bus=bus)
# send the characters
st = get_test_string()
st = b"\xaa"+chr(len(st)+3).encode()+st
h[ho[0]].kline_send(st, bus=bus, checksum=False)
# check for receive
ret = h[ho[1]].kline_drain(bus=bus)
print("ST Data:")
hexdump(st)
print("RET Data:")
hexdump(ret)
assert st == ret
print("K/L pass", bus, ho, "\n")
time.sleep(sleep_duration)
# **** test can line loopback ****
# for bus, gmlan in [(0, None), (1, False), (2, False), (1, True), (2, True)]:
for bus, gmlan in [(0, None), (1, None)]:
print("\ntest can", bus)
# flush
cans_echo = panda0.can_recv()
cans_loop = panda1.can_recv()
if gmlan is not None:
panda0.set_gmlan(gmlan, bus)
panda1.set_gmlan(gmlan, bus)
# send the characters
# pick addresses high enough to not conflict with honda code
at = random.randint(1024, 2000)
st = get_test_string()[0:8]
panda0.can_send(at, st, bus)
time.sleep(0.1)
# check for receive
cans_echo = panda0.can_recv()
cans_loop = panda1.can_recv()
print("Bus", bus, "echo", cans_echo, "loop", cans_loop)
assert len(cans_echo) == 1
assert len(cans_loop) == 1
assert cans_echo[0][0] == at
assert cans_loop[0][0] == at
assert cans_echo[0][2] == st
assert cans_loop[0][2] == st
assert cans_echo[0][3] == 0x80 | bus
if cans_loop[0][3] != bus:
print("EXPECTED %d GOT %d" % (bus, cans_loop[0][3]))
assert cans_loop[0][3] == bus
print("CAN pass", bus, ho)
time.sleep(sleep_duration)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, help="Number of test iterations to run")
parser.add_argument("-sleep", type=int, help="Sleep time between tests", default=0)
args = parser.parse_args()
if args.n is None:
while True:
run_test(sleep_duration=args.sleep)
else:
for i in range(args.n):
run_test(sleep_duration=args.sleep)
| mit |
M-R-Houghton/euroscipy_2015 | bokeh/bokeh/charts/builder/tests/test_dot_builder.py | 33 | 3939 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Dot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
xyvaluesdf = pd.DataFrame(xyvalues, index=['lists', 'loops'])
cat = ['lists', 'loops']
catjython = ['lists:0.75', 'loops:0.75']
catpypy = ['lists:0.5', 'loops:0.5']
catpython = ['lists:0.25', 'loops:0.25']
python = seg_top_python = [2, 5]
pypy = seg_top_pypy = [12, 40]
jython = seg_top_jython = [22, 30]
zero = [0, 0]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['catjython'], catjython)
assert_array_equal(builder._data['catpython'], catpython)
assert_array_equal(builder._data['catpypy'], catpypy)
assert_array_equal(builder._data['python'], python)
assert_array_equal(builder._data['jython'], jython)
assert_array_equal(builder._data['pypy'], pypy)
assert_array_equal(builder._data['seg_top_python'], seg_top_python)
assert_array_equal(builder._data['seg_top_jython'], seg_top_jython)
assert_array_equal(builder._data['seg_top_pypy'], seg_top_pypy)
assert_array_equal(builder._data['z_python'], zero)
assert_array_equal(builder._data['z_pypy'], zero)
assert_array_equal(builder._data['z_jython'], zero)
assert_array_equal(builder._data['zero'], zero)
lvalues = [[2, 5], [12, 40], [22, 30]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['cat0'], catpython)
assert_array_equal(builder._data['cat1'], catpypy)
assert_array_equal(builder._data['cat2'], catjython)
assert_array_equal(builder._data['0'], python)
assert_array_equal(builder._data['1'], pypy)
assert_array_equal(builder._data['2'], jython)
assert_array_equal(builder._data['seg_top_0'], seg_top_python)
assert_array_equal(builder._data['seg_top_1'], seg_top_pypy)
assert_array_equal(builder._data['seg_top_2'], seg_top_jython)
assert_array_equal(builder._data['z_0'], zero)
assert_array_equal(builder._data['z_1'], zero)
assert_array_equal(builder._data['z_2'], zero)
assert_array_equal(builder._data['zero'], zero)
| mit |
cjayb/mne-python | examples/inverse/plot_source_space_snr.py | 3 | 3461 | # -*- coding: utf-8 -*-
"""
===============================
Computing source space SNR
===============================
This example shows how to compute and plot source space SNR as in [1]_.
"""
# Author: Padma Sundaram <tottochan@gmail.com>
# Kaisu Lankinen <klankinen@mgh.harvard.edu>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 2
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
# Read data
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fname_fwd)
cov = mne.read_cov(fname_cov)
###############################################################################
# MEG-EEG
# -------
# Read inverse operator:
inv_op = make_inverse_operator(evoked.info, fwd, cov, fixed=True, verbose=True)
# Calculate MNE:
snr = 3.0
lambda2 = 1.0 / snr ** 2
stc = apply_inverse(evoked, inv_op, lambda2, 'MNE', verbose=True)
# Calculate SNR in source space:
snr_stc = stc.estimate_snr(evoked.info, fwd, cov)
# Plot an average SNR across source points over time:
ave = np.mean(snr_stc.data, axis=0)
fig, ax = plt.subplots()
ax.plot(evoked.times, ave)
ax.set(xlabel='Time (sec)', ylabel='SNR MEG-EEG')
fig.tight_layout()
# Find time point of maximum SNR:
maxidx = np.argmax(ave)
# Plot SNR on source space at the time point of maximum SNR:
kwargs = dict(initial_time=evoked.times[maxidx], hemi='split',
views=['lat', 'med'], subjects_dir=subjects_dir, size=(600, 600),
clim=dict(kind='value', lims=(-100, -70, -40)),
transparent=True, colormap='viridis')
snr_stc.plot(**kwargs)
###############################################################################
# EEG
# ---
# Next we do the same for EEG and plot the result on the cortex:
evoked_eeg = evoked.copy().pick_types(eeg=True, meg=False)
inv_op_eeg = make_inverse_operator(evoked_eeg.info, fwd, cov, fixed=True,
verbose=True)
stc_eeg = apply_inverse(evoked_eeg, inv_op_eeg, lambda2, 'MNE', verbose=True)
snr_stc_eeg = stc_eeg.estimate_snr(evoked_eeg.info, fwd, cov)
snr_stc_eeg.plot(**kwargs)
###############################################################################
# MEG
# ---
# Finally we do this for MEG:
evoked_meg = evoked.copy().pick_types(eeg=False, meg=True)
inv_op_meg = make_inverse_operator(evoked_meg.info, fwd, cov, fixed=True,
verbose=True)
stc_meg = apply_inverse(evoked_meg, inv_op_meg, lambda2, 'MNE', verbose=True)
snr_stc_meg = stc_meg.estimate_snr(evoked_meg.info, fwd, cov)
snr_stc_meg.plot(**kwargs)
##############################################################################
# References
# ----------
# .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon, D.,
# Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009). Mapping the
# Signal-To-Noise-Ratios of Cortical Sources in Magnetoencephalography
# and Electroencephalography. Human Brain Mapping, 30(4), 1077–1086.
# doi:10.1002/hbm.20571
| bsd-3-clause |
syedjafri/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
OXPHOS/shogun | examples/undocumented/python/graphical/classifier_perceptron_graphical.py | 10 | 2302 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from shogun import RealFeatures, BinaryLabels
from shogun import Perceptron
from shogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
| gpl-3.0 |
stharrold/bench_fastq | bench_fastq/utils.py | 2 | 15946 | #!/usr/bin/env python
"""Utils to parse the terminal output from bench_compress.sh
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import json
import datetime as dt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def parse_elapsed(elapsed):
"""Parse string of elapsed time from output of Unix 'time' command into
`datetime.timedelta`.
Parameters
----------
elapsed : string
Elapsed time field output from Unix 'time' command.
Format: [HH:]MM:SS[.SSS]
Returns
-------
elapsed_dt : datetime.timedelta
Elapsed time as ``datetime.timedelta``.
"""
elapsed_arr = elapsed.split(':')
if len(elapsed_arr) == 2:
hours = '0'
[minutes, seconds] = elapsed_arr
elif len(elapsed_arr) == 3:
[hours, minutes, seconds] = elapsed_arr
else:
raise AssertionError(("Program error. Elapsed time does not have 2 or 3 fields:\n" +
"{ea}").format(ea=elapsed_arr))
hours_int = int(float(hours))
minutes_int = int(float(minutes))
seconds_int = int(float(seconds))
milliseconds_int = int((float(seconds) - seconds_int) / 0.001)
elapsed_dt = dt.timedelta(hours=hours_int,
minutes=minutes_int,
seconds=seconds_int,
milliseconds=milliseconds_int)
return elapsed_dt
def recursive_timedelta_to_totsec(dobj):
"""Recursively convert ``datetime.timedelta`` elements to total seconds
in a ``dict``.
Call this function before writing the ``dict`` to JSON.
Parameters
----------
dobj : dict
``dict`` that may contain ``datetime.timedelta`` elements. ``dict`` may
be nested.
Returns
-------
dobj_converted : dict
``dict`` with ``datetime.timedelta`` elements converted to
total seconds.
"""
dobj_converted = {}
for key in dobj:
if isinstance(dobj[key], dt.timedelta):
dobj_converted[key] = dobj[key].total_seconds()
elif isinstance(dobj[key], dict):
dobj_converted[key] = recursive_timedelta_to_totsec(dobj=dobj[key])
else:
dobj_converted[key] = dobj[key]
return dobj_converted
def parse_compress(fin, fout=None):
"""Parse terminal output from bench_compress.sh
Parse by filename, file size, compression method, compression ratio, compression and decompression speed.
Note: This function is rigidly dependent upon bench_compress.sh.
Parameters
----------
fin : string
Path to text file with terminal output.
fout : {None}, string, optional
Path to output .json file of parsed terminal output.
Returns
-------
parsed : dict
``dict`` of parsed terminal output.
"""
# Check input.
fpath = os.path.abspath(fin)
if not os.path.isfile(fpath):
raise IOError("File does not exist:\n{fpath}".format(fpath=fpath))
if fout is not None:
if not os.path.splitext(fout)[1] == '.json':
raise IOError(("File extension is not '.json':\n" +
"{fout}").format(fout=fout))
# Parse text file into dict.
parsed = {}
skip_lines = None
catch_initial_size = None
catch_comp_cmd = None
catch_comp_time = None
catch_comp_size = None
catch_decomp_cmd = None
catch_decomp_time = None
catch_decomp_size = None
with open(fpath, 'rb') as fobj:
for line in fobj:
line = line.rstrip()
if line.startswith('Begin processing:'):
line_arr = line.split(':')
fname = os.path.splitext(os.path.basename(line_arr[1]))[0]
parsed[fname] = {}
continue
# Note: Typo in original script "Intial". Do not correct.
elif line.startswith('Intial .fastq size:'):
catch_initial_size = True
skip_lines = 1
continue
elif catch_initial_size and skip_lines >= 0:
if skip_lines > 0:
skip_lines -= 1
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname]['size_bytes'] = int(line_arr[0])
assert os.path.basename(line_arr[1]) == fname
catch_initial_size = False
skip_lines = None
continue
elif line.startswith('Iteration:'):
line_arr = line.split(':')
iteration = int(line_arr[1])
parsed[fname][iteration] = {}
continue
elif line.startswith('Testing'):
line_arr = line.rstrip(':').split()
method = line_arr[1]
parsed[fname][iteration][method] = {}
catch_comp_cmd = True
continue
elif catch_comp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['compress'] = {}
parsed[fname][iteration][method]['compress']['command'] = line
catch_comp_cmd = False
catch_comp_time = True
continue
elif catch_comp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['compress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['compress']['CPU_percent'] = pct_cpu
catch_comp_time = False
catch_comp_size = True
continue
elif catch_comp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['compress']['size_bytes'] = int(line_arr[0])
catch_comp_size = False
skip_lines = None
catch_decomp_cmd = True
continue
elif catch_decomp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['decompress'] = {}
parsed[fname][iteration][method]['decompress']['command'] = line
catch_decomp_cmd = False
catch_decomp_time = True
continue
elif catch_decomp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['decompress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['decompress']['CPU_percent'] = pct_cpu
catch_decomp_time = False
catch_decomp_size = True
continue
elif catch_decomp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['decompress']['size_bytes'] = int(line_arr[0])
if parsed[fname]['size_bytes'] != parsed[fname][iteration][method]['decompress']['size_bytes']:
# noinspection PyPep8
print(("WARNING: File size before and after compression test do not match.\n" +
"file name = {fname}\n" +
"method = {method}\n" +
"initial size (bytes) = {init_size}\n" +
"final size (bytes) = {finl_size}").format(fname=fname, method=method,
init_size=parsed[fname]['size_bytes'],
finl_size=parsed[fname][iteration][method]['decompress']['size_bytes']),
file=sys.stderr)
catch_decomp_size = False
skip_lines = None
continue
# Write out dict as JSON.
if fout is not None:
parsed_converted = recursive_timedelta_to_totsec(dobj=parsed)
print("Writing parsed text to: {fout}".format(fout=fout))
with open(fout, "wb") as fobj:
json.dump(parsed_converted, fobj, indent=4, sort_keys=True)
return parsed
def parsed_dict_to_df(parsed_dict):
"""Convert ``dict`` from parse_compress to ``pandas.dataframe``.
Parameters
----------
parsed_dict : dict
``dict`` of parsed terminal output.
Returns
-------
parsed_df : pandas.dataframe
``pandas.dataframe`` with heirarchical index by filename, iteration,
method, quantity.
"""
# TODO: make recursive method, e.g. http://stackoverflow.com/questions/9538875/recursive-depth-of-python-dictionary
filename_df_dict = {}
for filename in parsed_dict:
iteration_df_dict = {}
for iteration in parsed_dict[filename]:
method_df_dict = {}
# Skip size_bytes for file since not a nested dict.
if isinstance(parsed_dict[filename][iteration], dict):
for method in parsed_dict[filename][iteration]:
method_df_dict[method] = pd.DataFrame.from_dict(parsed_dict[filename][iteration][method],
orient='columns')
iteration_df_dict[iteration] = pd.concat(method_df_dict, axis=1)
filename_df_dict[filename] = pd.concat(iteration_df_dict, axis=1)
parsed_df = pd.concat(filename_df_dict, axis=1)
parsed_df.index.names = ['quantity']
parsed_df.columns.names = ['filename', 'iteration', 'method', 'process']
return parsed_df
def condense_parsed_df(parsed_df, parsed_dict):
"""Condense ``pandas.dataframe`` from parsed terminal output.
Calculate compression/decompression rate in GB per minute and compression ratio, averaging over iterations and
taking median of results.
Parameters
----------
parsed_df : pandas.DataFrame
``pandas.DataFrame`` from `parsed_dict_to_df`.
Index name: quantity
Heirarchical column names: filename, method, process, iteration
parsed_dict : dict
Nested ``dict`` from parse_compress.
Returns
-------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
See Also
--------
parsed_dict_to_df, parse_compress, reduce_condensed_df
"""
# Calculate compression/decompression rate in GB per minute and compression ratio.
# Drop quantities except for 'GB_per_minute' and 'compression_ratio'. Drop test files and incomplete tests.
# Average over iterations. Take median of results.
condensed_df = parsed_df.stack(['filename', 'method', 'process', 'iteration']).unstack('quantity').copy()
condensed_df['elapsed_seconds'] = condensed_df['elapsed_time'].apply(
lambda x: x.total_seconds() if isinstance(x, dt.timedelta) else x)
condensed_df['elapsed_seconds'] = condensed_df['elapsed_seconds'].apply(lambda x: np.NaN if x == 0.0 else x)
condensed_df['GB_per_minute'] = np.NaN
condensed_df['compression_ratio'] = np.NaN
# TODO: Use .values to vectorize
for fname in condensed_df.index.levels[0].values:
# TODO: remove SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame
condensed_df.loc[fname, 'GB_per_minute'].update(
(parsed_dict[fname]['size_bytes'] / condensed_df.loc[fname, 'elapsed_seconds']).multiply(60.0 / 1.0E9))
condensed_df.loc[fname, 'compression_ratio'].update(
condensed_df.loc[fname, 'size_bytes'].div(parsed_dict[fname]['size_bytes']))
return condensed_df
def reduce_condensed_df(condensed_df):
"""Reduce ``pandas.DataFrame`` from `condense_parsed_df` by averaging over iterations and taking the median over
file names.
Parameters
----------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
Returns
-------
reduced_ser : pandas.Series'
``pandas.Series`` from `condense_parsed_df`.
Heirarchical index names: method, process, quantity
See Also
--------
condense_parsed_df, plot_rate, plot_ratio
"""
reduced_ser = condensed_df.stack().unstack(['filename', 'method', 'process', 'quantity']).mean()
reduced_ser = reduced_ser.unstack(['method', 'process', 'quantity']).median()
return reduced_ser
def plot_rate(reduced_ser, fout=None):
"""Plot processing rate vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_ratio
"""
plt.figure()
pd.DataFrame.plot(reduced_ser.unstack(['quantity'])['GB_per_minute'].unstack(['process']),
title="Processing rate vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
legend = plt.legend(loc='best', title="Process")
legend.get_texts()[0].set_text('Compress')
legend.get_texts()[1].set_text('Decompress')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Processing rate (GB per minute)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
def plot_ratio(reduced_ser, fout=None):
"""Plot compression ratio vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_rate
"""
plt.figure()
pd.Series.plot(reduced_ser.unstack(['quantity'])['compression_ratio'].unstack(['process'])['compress'],
title="Compression size ratio vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Compression size ratio\n(compressed size / decompressed size)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
| mit |
jdmcbr/geopandas | geopandas/io/tests/test_infer_schema.py | 2 | 8015 | from collections import OrderedDict
from shapely.geometry import (
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
import pandas as pd
import numpy as np
from geopandas import GeoDataFrame
from geopandas.io.file import infer_schema
# Credit: Polygons below come from Montreal city Open Data portal
# http://donnees.ville.montreal.qc.ca/dataset/unites-evaluation-fonciere
city_hall_boundaries = Polygon(
(
(-73.5541107525234, 45.5091983609661),
(-73.5546126200639, 45.5086813829106),
(-73.5540185061397, 45.5084409343852),
(-73.5539986525799, 45.5084323044531),
(-73.5535801792994, 45.5089539203786),
(-73.5541107525234, 45.5091983609661),
)
)
vauquelin_place = Polygon(
(
(-73.5542465586147, 45.5081555487952),
(-73.5540185061397, 45.5084409343852),
(-73.5546126200639, 45.5086813829106),
(-73.5548825850032, 45.5084033554357),
(-73.5542465586147, 45.5081555487952),
)
)
city_hall_walls = [
LineString(
(
(-73.5541107525234, 45.5091983609661),
(-73.5546126200639, 45.5086813829106),
(-73.5540185061397, 45.5084409343852),
)
),
LineString(
(
(-73.5539986525799, 45.5084323044531),
(-73.5535801792994, 45.5089539203786),
(-73.5541107525234, 45.5091983609661),
)
),
]
city_hall_entrance = Point(-73.553785, 45.508722)
city_hall_balcony = Point(-73.554138, 45.509080)
city_hall_council_chamber = Point(-73.554246, 45.508931)
point_3D = Point(-73.553785, 45.508722, 300)
linestring_3D = LineString(
(
(-73.5541107525234, 45.5091983609661, 300),
(-73.5546126200639, 45.5086813829106, 300),
(-73.5540185061397, 45.5084409343852, 300),
)
)
polygon_3D = Polygon(
(
(-73.5541107525234, 45.5091983609661, 300),
(-73.5535801792994, 45.5089539203786, 300),
(-73.5541107525234, 45.5091983609661, 300),
)
)
def test_infer_schema_only_points():
df = GeoDataFrame(geometry=[city_hall_entrance, city_hall_balcony])
assert infer_schema(df) == {"geometry": "Point", "properties": OrderedDict()}
def test_infer_schema_points_and_multipoints():
df = GeoDataFrame(
geometry=[
MultiPoint([city_hall_entrance, city_hall_balcony]),
city_hall_balcony,
]
)
assert infer_schema(df) == {
"geometry": ["MultiPoint", "Point"],
"properties": OrderedDict(),
}
def test_infer_schema_only_multipoints():
df = GeoDataFrame(
geometry=[
MultiPoint(
[city_hall_entrance, city_hall_balcony, city_hall_council_chamber]
)
]
)
assert infer_schema(df) == {"geometry": "MultiPoint", "properties": OrderedDict()}
def test_infer_schema_only_linestrings():
df = GeoDataFrame(geometry=city_hall_walls)
assert infer_schema(df) == {"geometry": "LineString", "properties": OrderedDict()}
def test_infer_schema_linestrings_and_multilinestrings():
df = GeoDataFrame(geometry=[MultiLineString(city_hall_walls), city_hall_walls[0]])
assert infer_schema(df) == {
"geometry": ["MultiLineString", "LineString"],
"properties": OrderedDict(),
}
def test_infer_schema_only_multilinestrings():
df = GeoDataFrame(geometry=[MultiLineString(city_hall_walls)])
assert infer_schema(df) == {
"geometry": "MultiLineString",
"properties": OrderedDict(),
}
def test_infer_schema_only_polygons():
df = GeoDataFrame(geometry=[city_hall_boundaries, vauquelin_place])
assert infer_schema(df) == {"geometry": "Polygon", "properties": OrderedDict()}
def test_infer_schema_polygons_and_multipolygons():
df = GeoDataFrame(
geometry=[
MultiPolygon((city_hall_boundaries, vauquelin_place)),
city_hall_boundaries,
]
)
assert infer_schema(df) == {
"geometry": ["MultiPolygon", "Polygon"],
"properties": OrderedDict(),
}
def test_infer_schema_only_multipolygons():
df = GeoDataFrame(geometry=[MultiPolygon((city_hall_boundaries, vauquelin_place))])
assert infer_schema(df) == {"geometry": "MultiPolygon", "properties": OrderedDict()}
def test_infer_schema_multiple_shape_types():
df = GeoDataFrame(
geometry=[
MultiPolygon((city_hall_boundaries, vauquelin_place)),
city_hall_boundaries,
MultiLineString(city_hall_walls),
city_hall_walls[0],
MultiPoint([city_hall_entrance, city_hall_balcony]),
city_hall_balcony,
]
)
assert infer_schema(df) == {
"geometry": [
"MultiPolygon",
"Polygon",
"MultiLineString",
"LineString",
"MultiPoint",
"Point",
],
"properties": OrderedDict(),
}
def test_infer_schema_mixed_3D_shape_type():
df = GeoDataFrame(
geometry=[
MultiPolygon((city_hall_boundaries, vauquelin_place)),
city_hall_boundaries,
MultiLineString(city_hall_walls),
city_hall_walls[0],
MultiPoint([city_hall_entrance, city_hall_balcony]),
city_hall_balcony,
point_3D,
]
)
assert infer_schema(df) == {
"geometry": [
"3D Point",
"MultiPolygon",
"Polygon",
"MultiLineString",
"LineString",
"MultiPoint",
"Point",
],
"properties": OrderedDict(),
}
def test_infer_schema_mixed_3D_Point():
df = GeoDataFrame(geometry=[city_hall_balcony, point_3D])
assert infer_schema(df) == {
"geometry": ["3D Point", "Point"],
"properties": OrderedDict(),
}
def test_infer_schema_only_3D_Points():
df = GeoDataFrame(geometry=[point_3D, point_3D])
assert infer_schema(df) == {"geometry": "3D Point", "properties": OrderedDict()}
def test_infer_schema_mixed_3D_linestring():
df = GeoDataFrame(geometry=[city_hall_walls[0], linestring_3D])
assert infer_schema(df) == {
"geometry": ["3D LineString", "LineString"],
"properties": OrderedDict(),
}
def test_infer_schema_only_3D_linestrings():
df = GeoDataFrame(geometry=[linestring_3D, linestring_3D])
assert infer_schema(df) == {
"geometry": "3D LineString",
"properties": OrderedDict(),
}
def test_infer_schema_mixed_3D_Polygon():
df = GeoDataFrame(geometry=[city_hall_boundaries, polygon_3D])
assert infer_schema(df) == {
"geometry": ["3D Polygon", "Polygon"],
"properties": OrderedDict(),
}
def test_infer_schema_only_3D_Polygons():
df = GeoDataFrame(geometry=[polygon_3D, polygon_3D])
assert infer_schema(df) == {"geometry": "3D Polygon", "properties": OrderedDict()}
def test_infer_schema_null_geometry_and_2D_point():
df = GeoDataFrame(geometry=[None, city_hall_entrance])
# None geometry type is then omitted
assert infer_schema(df) == {"geometry": "Point", "properties": OrderedDict()}
def test_infer_schema_null_geometry_and_3D_point():
df = GeoDataFrame(geometry=[None, point_3D])
# None geometry type is then omitted
assert infer_schema(df) == {"geometry": "3D Point", "properties": OrderedDict()}
def test_infer_schema_null_geometry_all():
df = GeoDataFrame(geometry=[None, None])
# None geometry type in then replaced by 'Unknown'
# (default geometry type supported by Fiona)
assert infer_schema(df) == {"geometry": "Unknown", "properties": OrderedDict()}
def test_infer_schema_int64():
int64col = pd.array([1, np.nan], dtype=pd.Int64Dtype())
df = GeoDataFrame(geometry=[city_hall_entrance, city_hall_balcony])
df["int64"] = int64col
assert infer_schema(df) == {
"geometry": "Point",
"properties": OrderedDict([("int64", "int")]),
}
| bsd-3-clause |
zrhans/pythonanywhere | w3/bns/bns.py | 1 | 14997 | #!/usr/bin/python3
#-*- coding:utf-8 -*-
"""
Created on Sat May 30 14:34:56 2015
@author: hans
"""
import os
import subprocess
import pandas as pd
import matplotlib
matplotlib.use('Agg')
#matplotlib.rcParams['font.family'] = 'sans-serif'
#matplotlib.rcParams['font.sans-serif'] = ['Tahoma']#,'Bitstream Vera Sans','Lucida Grande','Verdana']
#matplotlib.rcParams['lines.linewidth'] = 2
#matplotlib.rcParams['figure.max_num_figures'] = 30
import matplotlib.pyplot as plt
import time
import datetime
#print(matplotlib.__version__ )
#print(plt.style.available)
#matplotlib.style.use('ggplot')
matplotlib.style.use('ggplot')
#print(plt.__file__)
version = datetime.datetime.now()
data = datetime.date.today()
ano = data.year
mes = data.month
dia = data.day
odatafile = "bns_%s-%s.csv" % (ano, mes)
osvdatafile = "bns_%s-%s-osvaldo.csv" % (ano, mes)
#DATAFILE = "bns01_%s-%s.csv" % (ano, mes) #Arquivo gerado no ecologger
DATAFILE = 'bns01.csv'
#df = pd.read_csv('ftp://fortran-zrhans.c9.io/csdapy/sr311-2014.csv')
#os.chdir(r'/home/zrhans/Web/py/bns')
os.chdir(r'/home/zrhans/w3/bns')
#os.system("wget -c micrometeorologia.org/incomming/bns01.csv")
#P2 var = commands.getoutput("wget -c micrometeorologia.org/incomming/"+DATAFILE)
#var = subprocess.check_output(["wget -c micrometeorologia.org/incomming/"+DATAFILE, shell=True])
cmdout = subprocess.call("wget -c micrometeorologia.org/incomming/bns01.csv", shell=True)
f = open('bns.log','a')
f.write('\n UTC\n '+50*'-'+'\n %s | em %s\n' % (cmdout, version))
f.close()
"""
Lendo arquivos de entrada
"""
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
print("\n Datafile Carregado...\n")
#print(df.head())
#print("\n Listando indices\n")
#print("df.index")
#print("\nOK\n")
"""
datafile = 'http://zrhans.koding.io/py/bns/bns01.csv'
df = pd.read_csv(datafile, sep=',', index_col='DATA',parse_dates=True, engine='python')
colunas = list(df.PA[:38].unique()) #padrao de saida eh array, entao tranformo para lista para iterar
for var in colunas:
#Remove o ponto do nome das variaveis#
cols.append(var.replace('.',''))
for var in colunas:
#Gerando DataFrame para cada Parametro e Mudando o nome das colunas
de acordo com o parametro
#
daf = "df_%s"%(var.replace('.',''))
print "%s = df[df.PA == '%s']; %s.rename(columns={'MEDIA':'%s','QT_MEVAL_MIN':'%s_min','QT_MEVAL_MAX':'%s_max'}, inplace=True);" % (daf,var,daf,var,var,var)
"""
colunas = list(df.PA.unique()) #padrao de saida eh array, entao tranformo para lista para iterar
# Comandos para criar um dataframe para cada parametro como nome das colunas jah renomeadas
#print("\n Lista de Colunas...\n")
#print(colunas)
print("\n Criando DataFrames...\n")
df_Chuva = [[]]
df_Chuva = df[df.PA == 'Chuva']
cols = {'MEDIA':'Chuva','QT_MEVAL_MIN':'Chuva_min','QT_MEVAL_MAX':'Chuva_max'}
new_df = df_Chuva.rename(columns=cols);
df_Chuva = new_df.copy()
#df_Chuva.rename(columns={'QT_MEVAL_MAX':'C----'}, inplace=True)
df_VVE = df[df.PA == 'VVE']
df_VVE.rename(columns={'MEDIA':'VVE','QT_MEVAL_MIN':'VVE_min',r'QT_MEVAL_MAX':u'VVE_max'}, inplace=True);
df_DVE = df[df.PA == 'DVE']
df_DVE.rename(columns={'MEDIA':'DVE','QT_MEVAL_MIN':'DVE_min','QT_MEVAL_MAX':'DVE_max'}, inplace=True);
df_Temp = df[df.PA == 'Temp.']
df_Temp.rename(columns={'MEDIA':'Temp.','QT_MEVAL_MIN':'Temp._min','QT_MEVAL_MAX':'Temp._max'}, inplace=True)
df_Umidade = df[df.PA == 'Umidade']
df_Umidade.rename(columns={'MEDIA':'Umidade','QT_MEVAL_MIN':'Umidade_min','QT_MEVAL_MAX':'Umidade_max'}, inplace=True)
df_Rad = df[df.PA == 'Rad.'];
df_Rad.rename(columns={'MEDIA':'Rad.','QT_MEVAL_MIN':'Rad._min','QT_MEVAL_MAX':'Rad._max'}, inplace=True);
df_PresAtm = df[df.PA == 'Pres.Atm.'];
df_PresAtm.rename(columns={'MEDIA':'Pres.Atm.','QT_MEVAL_MIN':'Pres.Atm._min','QT_MEVAL_MAX':'Pres.Atm._max'}, inplace=True);
df_TempInt = df[df.PA == 'Temp.Int.'];
df_TempInt.rename(columns={'MEDIA':'Temp.Int.','QT_MEVAL_MIN':'Temp.Int._min','QT_MEVAL_MAX':'Temp.Int._max'}, inplace=True);
df_CH4 = df[df.PA == 'CH4']; df_CH4.rename(columns={'MEDIA':'CH4','QT_MEVAL_MIN':'CH4_min','QT_MEVAL_MAX':'CH4_max'}, inplace=True);
df_HCnM = df[df.PA == 'HCnM']; df_HCnM.rename(columns={'MEDIA':'HCnM','QT_MEVAL_MIN':'HCnM_min','QT_MEVAL_MAX':'HCnM_max'}, inplace=True);
df_HCT = df[df.PA == 'HCT']; df_HCT.rename(columns={'MEDIA':'HCT','QT_MEVAL_MIN':'HCT_min','QT_MEVAL_MAX':'HCT_max'}, inplace=True);
df_SO2 = df[df.PA == 'SO2']; df_SO2.rename(columns={'MEDIA':'SO2','QT_MEVAL_MIN':'SO2_min','QT_MEVAL_MAX':'SO2_max'}, inplace=True);
df_O3 = df[df.PA == 'O3']; df_O3.rename(columns={'MEDIA':'O3','QT_MEVAL_MIN':'O3_min','QT_MEVAL_MAX':'O3_max'}, inplace=True);
df_NO = df[df.PA == 'NO']; df_NO.rename(columns={'MEDIA':'NO','QT_MEVAL_MIN':'NO_min','QT_MEVAL_MAX':'NO_max'}, inplace=True);
df_NO2 = df[df.PA == 'NO2']; df_NO2.rename(columns={'MEDIA':'NO2','QT_MEVAL_MIN':'NO2_min','QT_MEVAL_MAX':'NO2_max'}, inplace=True);
df_NOx = df[df.PA == 'NOx']; df_NOx.rename(columns={'MEDIA':'NOx','QT_MEVAL_MIN':'NOx_min','QT_MEVAL_MAX':'NOx_max'}, inplace=True);
df_CO = df[df.PA == 'CO']; df_CO.rename(columns={'MEDIA':'CO','QT_MEVAL_MIN':'CO_min','QT_MEVAL_MAX':'CO_max'}, inplace=True);
df_MP10 = df[df.PA == 'MP10']; df_MP10.rename(columns={'MEDIA':'MP10','QT_MEVAL_MIN':'MP10_min','QT_MEVAL_MAX':'MP10_max'}, inplace=True);
df_MPT = df[df.PA == 'MPT']; df_MPT.rename(columns={'MEDIA':'MPT','QT_MEVAL_MIN':'MPT_min','QT_MEVAL_MAX':'MPT_max'}, inplace=True);
df_Fin = df[df.PA == 'Fin'];
df_Fin.rename(columns={'MEDIA':'Fin','QT_MEVAL_MIN':'Fin_min','QT_MEVAL_MAX':'Fin_max'}, inplace=True);
df_Vin = df[df.PA == 'Vin'];
df_Vin.rename(columns={'MEDIA':'Vin','QT_MEVAL_MIN':'Vin_min','QT_MEVAL_MAX':'Vin_max'}, inplace=True);
df_Vout = df[df.PA == 'Vout'];
df_Vout.rename(columns={'MEDIA':'Vout','QT_MEVAL_MIN':'Vout_min','QT_MEVAL_MAX':'Vout_max'}, inplace=True);
print("OK\n")
"""
for var in colunas:
# Gera comandos de execucao de joins para criar a tabela final
daf = "df_%s"%(var.replace('.',''))
print "df = df.join(%s[['%s','%s_min','%s_max']])" % (daf,var,var,var)
"""
print("Limpando DataFrame...\n")
df = df[[]]
print( "Pronto!\n")
print( "\nExecutando join...")
""" Criando coluna do Dia do Ano par ao Prof. Osvaldo"""
#for i in df.index:
#df['DJ'] = i.strftime('%j')
#df['HHMM'] = i.strftime('%H')+i.strftime('%M')
#print(i,i.strftime('%j'),i.strftime('%H')+i.strftime('%M'))
#print("\nconferindo Colunas de Data...")
#print(df.tail(5))
print( "\nExecutando joins...")
df = df.join(df_Chuva[['Chuva','Chuva_min','Chuva_max']])
#print( "\nExecutando join VVE...")
df = df.join(df_VVE[['VVE','VVE_min','VVE_max']])
#print( "\nExecutando join DVE...")
df = df.join(df_DVE[['DVE','DVE_min','DVE_max']])
#print( "\nExecutando join Temp...")
df = df.join(df_Temp[['Temp.','Temp._min','Temp._max']])
#print( "\nExecutando join Umidade...")
df = df.join(df_Umidade[['Umidade','Umidade_min','Umidade_max']])
#print( "\nExecutando join Rad...")
df = df.join(df_Rad[['Rad.','Rad._min','Rad._max']])
#print( "\nExecutando join Pres.Atm...")
df = df.join(df_PresAtm[['Pres.Atm.','Pres.Atm._min','Pres.Atm._max']])
#print( "\nExecutando join Temp.Int...")
df = df.join(df_TempInt[['Temp.Int.','Temp.Int._min','Temp.Int._max']])
#print( "\nExecutando join Gases...")
df = df.join(df_CH4[['CH4','CH4_min','CH4_max']])
df = df.join(df_HCnM[['HCnM','HCnM_min','HCnM_max']])
df = df.join(df_HCT[['HCT','HCT_min','HCT_max']])
df = df.join(df_SO2[['SO2','SO2_min','SO2_max']])
df = df.join(df_O3[['O3','O3_min','O3_max']])
df = df.join(df_NO[['NO','NO_min','NO_max']])
df = df.join(df_NO2[['NO2','NO2_min','NO2_max']])
df = df.join(df_NOx[['NOx','NOx_min','NOx_max']])
df = df.join(df_CO[['CO','CO_min','CO_max']])
df = df.join(df_MP10[['MP10','MP10_min','MP10_max']])
df = df.join(df_MPT[['MPT','MPT_min','MPT_max']])
df = df.join(df_Fin[['Fin','Fin_min','Fin_max']])
df = df.join(df_Vin[['Vin','Vin_min','Vin_max']])
df = df.join(df_Vout[['Vout','Vout_min','Vout_max']])
print( "OK\n")
#print(df.columns.tolist())
print( "\n Exportando DataFrame para .csv ...")
df.to_csv(odatafile, parse_dates=True)
dfo = df.copy()
#del(dfo['index'])
dfo.index = df.index.dayofyear + df.index.hour + df.index.minute
dfo.to_csv(osvdatafile,parse_dates=True)
print( "OK\n")
#print( "\n Exportando DataFrame para .xlsx ...")
#DataFrame.to_excel(excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, startrow=0, startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep='inf', verbose=True)¶
outputxlsx = "bns_%s-%s.xlsx" % (ano, mes)
Sheet1 = "bns_%s-%s" % (ano, mes)
##Demora df.to_excel(outputxlsx, sheet_name=Sheet1, header=True)
#print( "OK\n")
#df.rename(columns={'MEDIA':'%s','QT_MEVAL_MIN':'%s_min','QT_MEVAL_MAX':'%s_max'}, inplace=True);" % (daf,
#pandas.read_excel(io, sheetname=0, header=0, skiprows=None, skip_footer=0,
#index_col=None, parse_cols=None, parse_dates=False, date_parser=None,
# na_values=None, thousands=None, convert_float=True, has_index_names=None,
# converters=None, engine=None, **kwds)
#print(df.head())
def graficos(prefix,var,namevar,varunit):
## Grafico do DataFrame Inteiro
#plt.plot(df[['Valor.20','Valor.21']])
if var == 'Chuva':
rainD = df['Chuva'].resample('1D', how='sum')
rainD['ACUM'] = df[['Chuva']].cumsum()
#print(rainD)
df[[var]].plot(title=namevar)
#rainD['ACUM'].plot(title='Acumulado')
#plt.bar(df.index, df['Chuva'], align='center', width=0.04, alpha=0.5 )
plt.ylabel('mm')
namefig = "graf/%s_%s.png" % (prefix,var)
plt.savefig(namefig)
else:
#print("Gerando grafico: %s | %s | %s | %s" %(prefix,var,namevar,varunit))
plt.figure(figsize=(18,12))
namevar = "BNS01 - %s" % (namevar)
df[[var]].plot(title=namevar)
plt.ylabel(varunit)
namefig = "graf/%s_%s.png" % (prefix,var)
plt.savefig(namefig)
return
#@todo Criar dicionario com o Parametro sendo a chave
param = {'MPT':('Analisador de Partículas Totais','MPT','mg/m³'), 'MP10':('Analisador de Particulas Inalaveis','MP10','mg/m³'),'CO':('Analisador de Monóxido de Carbono','CO','ppm'), 'NO':('Analisador de Óxidos de Nitrogênio','NO','ppm'),'NO2':('Analisador de Óxidos de Nitrogênio','NO2','ppm'),'NOx':('Analisador de Óxidos de Nitrogênio','NOx','ppm'),'O3':('Analisador de Ozônio','O3','ppm'),'SO2':('Analisador de Dióxido de Enxofre','SO2','ppm'),'CH4':('Analisador de Hidrocarbonetos','CH4','ppm'),'HCT':('Analisador de Hidrocarbonetos','HCT','ppm'),'HCnM':('Analisador de Hidrocarbonetos','HCnM','ppm'),'DVE':('Direção do Vento','DVE','Graus'), 'Pres.Atm.':('Pressão Atmosférica','Pres.Atm.','mbar'),'Rad.':('Radiação Solar','Rad.','W/m²'),'Temp.':('Temperatura do Ar','Temp.','°C'), 'Temp.Int.':('Temperatura Interna Estação','Temp.Int.','°C'),'Umidade':('Umidade Relativa','Umidade','%'),'VVE':('Velocidade do Vento','VVE','m/s'), 'Chuva':("Precipitação Pluviométrica",'Chuva','mm'),'Fin':('Suprimento de Energia','Fin','Hz'),'Vin':('Suprimento de Energia','Vin','V'),'Vout':('Suprimento de Energia ','Vout','V')}
for i, c in enumerate (colunas):
# c e param[c][1] representam a mesma cisa (sigla da variável) [var]
graficos(Sheet1,param[c][1],param[c][0],param[c][2])
#print("graficos(Sheet1,'%s','%s'" % (i,param[i]))
#print("item:%s -> %s \t Desc: %s" %(i,c,param[c][0]))
"""
,{'CO':'Analisador de Monóxido de Carbono - CO'}
,{'NO':'Analisador de Óxidos de Nitrogênio - NO'}
,{'NO2':'Analisador de Óxidos de Nitrogênio - NO2'}
,{'NOx':'Analisador de Óxidos de Nitrogênio - NOx'}
,{'O3':'Analisador de Ozônio - O3'}
,{'SO2':'Analisador de Dióxido de Enxofre - SO2'}
,{'CH4':'Analisador de Hidrocarbonetos - CH4'}
,{'HCT':'Analisador de Hidrocarbonetos - HCT'}
,{'HCnM':'Analisador de Hidrocarbonetos - HCnM'}
,{'DVE':'Entradas Analógicas - DVE'}
,{'Pres.Atm.':'Entradas Analógicas - Pres.Atm.'}
,{'Rad.':'Entradas Analógicas - Rad.'}
,{'Temp.':'Entradas Analógicas - Temp.'}
,{'Temp.Int.':'Entradas Analógicas - Temp.Int.'}
,{'Umidade':'Entradas Analógicas - Umidade'}
,{'VVE':'Entradas Analógicas - VVE'}
,{'Chuva':'Entradas Digitais - Chuva'}
,{'Fin':'Suprimento de Energia - Fin'}
,{'Vin':'Suprimento de Energia - Vin'}
,{'Vout':'Suprimento de Energia - Vout'}}
"""
version = datetime.date.today().strftime("%Y-%m")
version = data
os.chdir(r'/home/zrhans/w3/bns/graf/')
cwd = os.getcwd()
def gera_html():
versao = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
last_line = subprocess.getoutput('tail -1 /home/zrhans/w3/bns/bns01.csv | cut -d"," -f1')
vnpath = 'http://www.qualidadedoar.com/static/images/graf/'
HTML = ('index.html')
owl = ('owl.html')
thb = ('thb.html')
title = "Graficos - BNS01 - UTERG" # Alterar para o titulo do site
ficheiro = open(HTML,"w")
ficheiro.write("<html><head><title>"+title+"</title></head><body>")
ficheiro.write("<h1>%s </h1><i class='fi-loop'> </i><small> Versão atualizada em - <i>%s</i> UTC" % (title,versao))
ficheiro.write("<br><i class='fi-rss'> </i>Ultimo envio do dados - <i> %s </i> LTC</small><hr> <center>" % last_line)
fowl = open(owl,"w")
fthb = open(thb,"w")
for arquivo in os.listdir(cwd):
if arquivo.endswith('.png'):
#print('<a href="%s"><img src="%s"/></a><br/>' % (arquivo,arquivo))
ficheiro.write('<img src="%s%s"/><hr>' % (vnpath, arquivo))
fowl.write('<div class="item"><img src="http://www.qualidadedoar.com/static/images/graf/%s" /></div>' % arquivo)
fthb.write('<div class="column"><a data-toggle="%s"><img class="thumbnail s550" src="http://www.qualidadedoar.com/static/images/graf/%s"></a>' % (arquivo[:-5],arquivo))
fthb.write('<div class="full reveal" id="%s" data-reveal><p>%s</p><img src="http://www.qualidadedoar.com/static/images/graf/%s" alt="%s"> <button class="close-button" data-close aria-label="Close reveal" type="button"><span aria-hidden="true">×</span></button></div></div>' % (arquivo[:-5],arquivo,arquivo,arquivo))
ficheiro.write("</center></body></html>")
ficheiro.close()
fowl.close()
fthb.close()
gera_html()
cmdout = subprocess.call("mv index.html /home/zrhans/csda/templates/bns_graficos.html", shell=True)
cmdout = subprocess.call("mv owl.html /home/zrhans/csda/templates/bns_owl.html", shell=True)
cmdout = subprocess.call("mv thb.html /home/zrhans/csda/templates/thb_uterg.html", shell=True)
cmdout = subprocess.call("mv *.png /home/zrhans/csda/static/images/graf/", shell=True)
#graficos(Sheet1,'MPT','Material Particulado Total')
print(10*'=' + ' Completo \n') | apache-2.0 |
jorge2703/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
CellModels/tyssue | tyssue/particles/point_cloud.py | 2 | 10119 | """Utilities to generate point clouds
The positions of the points are generated along the architecture
of the epithelium.
"""
from collections import abc
import numpy as np
import pandas as pd
from ..config.subdiv import bulk_spec
class EdgeSubdiv:
"""
Container class to ease discretisation along the edges
"""
def __init__(self, edge_df, **kwargs):
"""Creates an indexer and an offset array to ease
discretisation along the edges.
Parameters
----------
edge_df: pd.DataFrame,
Keyword parameters
------------------
density: number of points per edge
Attributes
----------
upcaster: np.ndarray, shape (Np,)
edge indices repeated to match the lookup table
offset: np.ndarray, shape (Np,)
piecewise linear offset along the edges, such that
::math:M_{ij}^n = offset[n]*r_{ij}:
"""
self.edge_df = edge_df.copy()
self.n_edges = self.edge_df.shape[0]
self.specs = bulk_spec()
self.specs.update(**kwargs)
self.unit_length = edge_df.length.mean()
if "density" not in edge_df:
self.edge_df["density"] = self.specs["density"]
self.n_points = 0
self.points = None
self.offset_lut = None
self.update_all()
def update_all(self):
self.update_offset_lut()
self.update_particles()
self.update_upcaster()
self.update_offset()
@staticmethod
def from_eptm_edges(eptm, **kwargs):
"""Creates an EdgeSubdiv instance and computes the point
grid allong the edges from the source vertex to its target.
Returns
-------
subdiv: a :class:`EdgeSubdiv` instance
"""
subdiv = EdgeSubdiv(eptm.edge_df[["length", "density"]], **kwargs)
srce_pos = eptm.upcast_srce(eptm.vert_df[eptm.coords])
r_ij = eptm.edge_df[eptm.dcoords]
subdiv.edge_point_cloud(srce_pos, r_ij)
return subdiv
@staticmethod
def _offset_lut_(num):
return np.arange(0.5, num + 0.5) / num
def update_offset_lut(self, offset_lut=None):
"""
Updates the density lookup table function.
The `offset_lut` can be any function
with a single `num` argument
Parameters
----------
offset_lut: function, default None,
edge-wise function of the number of points
giving the offset positions
Default is a shifted regular grid:
`np.arange(0.5, num+0.5) / num`
"""
if offset_lut is None:
self.offset_lut = self._offset_lut_
else:
self.offset_lut = offset_lut
def update_particles(self):
"""
* Updates the number of particles per edge from edges length
and density values:
`num_particles = length * density`
* Also updates the self.points df
"""
self.edge_df["norm_length"] = self.edge_df["length"] / self.unit_length
points_per_edges = np.round(self.edge_df.eval("norm_length * density")).astype(
np.int
)
self.edge_df["num_particles"] = points_per_edges
self.n_points = points_per_edges.sum()
self.points = pd.DataFrame(
np.zeros((self.n_points, 2)), columns=["upcaster", "offset"]
)
def update_upcaster(self):
"""
resets the 'upcaster' column of self.points,
'upcaster' indexes over self.edge_df repeated to
upcast data from the edge df to the points df
"""
self.points["upcaster"] = np.repeat(
np.arange(self.edge_df.shape[0]), self.edge_df["num_particles"]
)
def update_offset(self):
self.points["offset"] = np.concatenate(
[self.offset_lut(num=ns) for ns in self.edge_df["num_particles"]]
)
def validate(self):
if not self.points["upcaster"].max() + 1 == self.n_edges:
return False
if not self.points["upcaster"].shape[0] == self.n_points:
return False
if not self.points["offset"].shape()[0] == self.n_points:
return False
return True
def upcast(self, df):
if isinstance(df, str) and df in self.edge_df:
return self.edge_df.loc[self.points["upcaster"], df]
elif (
isinstance(df, abc.Iterable)
and isinstance(df[0], str)
and set(df).issubset(self.edge_df.columns)
):
return self.edge_df.loc[self.points["upcaster"], df]
elif hasattr(df, "loc"):
return df.loc[self.points["upcaster"]]
else:
raise ValueError(
"""
Argument df should be a column name or a sequence of column names
or a Series or Dataframe indexed like self.edge_df
"""
)
def edge_point_cloud(
self,
srce_pos,
r_ij,
offset_modulation=None,
modulation_kwargs=None,
coords=["x", "y", "z"],
dcoords=["dx", "dy", "dz"],
):
"""Generates a point cloud along the edges of the epithelium.
if a offset_modulation function is provided, it is used to
transform the offsets
Parameters
----------
srce_pos: DataFrame of shape (self.Ne, ndim)
with the origins of the points for each edge
(usually the edge upcasted source vertex)
r_ij: DataFrame of shape (self.Ne, ndim)
the edge vector coordiantes
offset_modulation: function of self returning
an array with shape (self.Np,) containing
the modified offsets. self.points['offset']
is used by default.
modulation_kwargs: keyword arguments to the modulation
function
Returns
-------
points: (Np, 3) pd.DataFrame with the points positions
"""
for u, du in zip(coords, dcoords):
self.edge_df[u] = srce_pos[u]
self.edge_df["d" + u] = r_ij[du]
cols = coords + dcoords
upcast = self.edge_df.loc[self.points["upcaster"], cols]
if offset_modulation is None:
upcast["offset"] = self.points["offset"].values
else:
upcast["offset"] = offset_modulation(self, **modulation_kwargs)
for c in coords:
self.points[c] = upcast.eval("{} + offset * {}".format(c, "d" + c)).values
if self.specs["noise"] > 0.0:
self.points[coords] += np.random.normal(
scale=self.specs["noise"], size=(self.n_points, 3)
)
return self.points[coords]
def get_edge_bases(eptm, base=("face", "srce", "trgt")):
edge_upcast_pos = {
element: eptm.upcast_cols(element, eptm.coords) for element in base
}
origin = base[0]
edge_bases = {}
for vertex in base[1:]:
df = pd.DataFrame(
0, columns=eptm.coords + eptm.dcoords + ["length"], index=eptm.edge_df.index
)
df[eptm.dcoords] = (
edge_upcast_pos[vertex].values - edge_upcast_pos[origin].values
)
df["length"] = np.linalg.norm(df[eptm.dcoords].values, axis=1)
df[eptm.coords] = edge_upcast_pos[origin].values
edge_bases["{}_{}".format(origin, vertex)] = df.copy()
return edge_bases
class FaceGrid:
def __init__(self, edges_df, base, **kwargs):
self.origin = base[0]
self.base = ["{}_{}".format(base[0], other) for other in base[1:]]
self.specs = bulk_spec()
self.specs.update(kwargs)
e_specs = kwargs
self.subdivs = {key: EdgeSubdiv(edges_df[key], **e_specs) for key in self.base}
self.n_points = np.product(
[
subdiv.edge_df["num_particles"].values
for subdiv in self.subdivs.values()
],
axis=0,
).sum()
self.dim = len(self.subdivs)
self.up_cols = ["up_{}".format(key) for key in self.base]
self.of_cols = ["of_{}".format(key) for key in self.base]
self.points = None
def update_grid(self):
upcasters = {}
for key, subdiv in self.subdivs.items():
upcasters["up_" + key] = subdiv.points["upcaster"]
upcasters["of_" + key] = subdiv.points["offset"]
upcasters = pd.DataFrame.from_dict(upcasters)
points = {}
u_axis = "up_{}".format(self.base[0])
upcasters.set_index(u_axis, drop=False, inplace=True)
for cols in (self.of_cols, self.up_cols):
df = upcasters.groupby(level=u_axis).apply(_local_grid, *cols)
points.update({col: df[col].values for col in cols})
self.points = pd.DataFrame.from_dict(points)
def face_point_cloud(self, coords=["x", "y", "z"], dcoords=["dx", "dy", "dz"]):
upcast = {}
offsets = self.points[self.of_cols]
for key, subdiv in self.subdivs.items():
upcast[key] = subdiv.edge_df.loc[
self.points["up_{}".format(key)], coords + dcoords + ["length"]
].copy()
upcast[key].reset_index(inplace=True)
upcast[key]["offset"] = offsets["of_{}".format(key)].values
for u, du in zip(coords, dcoords):
self.points[u] = upcast[self.base[0]].eval(
"{} + offset * {}".format(u, du)
).values + np.sum(
[
upcast[other].eval("offset * {}".format(du)).values
for other in self.base[1:]
],
axis=0,
)
in_out = np.zeros(self.points.shape[0], dtype=bool)
for other in self.base[1:]:
xx = upcast[self.base[0]]["offset"].values
yy = upcast[other]["offset"].values
in_out += (xx + yy) < 1.0
self.points = self.points[in_out]
return self.points[coords]
def _local_grid(df, *cols):
grid = np.meshgrid(*(df[col] for col in cols))
out = pd.DataFrame.from_dict({col: mm.ravel() for col, mm in zip(cols, grid)})
return out
| gpl-2.0 |
madjelan/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
alimuldal/numpy | numpy/core/code_generators/ufunc_docstrings.py | 9 | 90842 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
remainder : Remainder complementary to floor_divide.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
Returns a scalar if both `x1` and `x2` are scalars.
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
feiyanzhandui/tware | vizdom/server.py | 2 | 5144 | #!/usr/bin/python
import json
import pandas as pd
import traceback
import uuid
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from csv import DictReader
from cache import Cache
from executor import BasicExecutor
from util import Timer
icd9s = [('infectious',1,140),
('metabolic',240,280),
('blood',280,290),
('neurologic',320,390),
('heart_hypertensive',401,406),
('heart_ischemic',410,415),
('heart_failure',428,429),
('pulmonary',460,520),
('digestive',520,580),
('renal_insufficiency',580,630)]
schema = [['icd9',[(icd9[0],False) for icd9 in icd9s]],
['demo',[('sex',False),
('age',True),
('race',False),
('marital',False),
('religion',False)]],
['phys',[('height',True),
('weight',True),
('bmi',True),
('temperature',True),
('heart_rate',True),
('resp_rate',True),
('systolic_bp',True),
('diastolic_bp',True),
('spo2',True),
('sapsi',False),
('sofa',False),
('gcs',False),
('braden',False)]],
['blood',[['bmp',[('sodium',True),
('potassium',True),
('chloride',True),
('magnesium',True),
('calcium',True),
('anion_gap',True),
('bun',True),
('creatinine',True),
('glucose',True)]],
['abg',[('ph',True),
('be',True),
('total_co2',True),
('total_o2',True),
('pco2',True),
('po2',True)]],
['cbc',[('wbc',True),
('rbc',True),
('hgb',True),
('hct',True),
('mcv',True),
('mch',True),
('mchc',True),
('rdw',True),
('plates',True),
('neuts',True),
('lymphs',True),
('monos',True),
('basos',True),
('eos',True),
('pt',True),
('inr_pt',True),
('ptt',True)]],
['cardiac',[('ckmb',True),
('cpkmb',True),
('ldh',True),
('bnp',True),
('tropi',True),
('tropt',True)]],
['hepatic',[('total_bili',True),
('direct_bili',True),
('indirect_bili',True),
('albumin',True),
('tg',True)]]]]]
class Server(HTTPServer):
def __init__(self, addr, handler, file_dir):
HTTPServer.__init__(self, addr, handler)
self.catalog = {}
self.cache = Cache()
#file_uuid = str(uuid.uuid4())
file_uuid = '0'
self.cache[file_uuid] = pd.read_csv(file_dir)
self.catalog['mimic2'] = {'uuid': file_uuid,
'schema': schema}
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
t = Timer()
t.start()
response = 200
result = {}
try:
content_length = int(self.headers.getheader('content-length'))
req = json.loads(self.rfile.read(content_length))
print req
req_type = req['type']
result = None
if req_type == 'catalog':
result = json.dumps(self.server.catalog)
elif req_type == 'execute':
task = req['args']['task']
json.dumps(BasicExecutor(self.server.cache, task).execute())
elif req_type == 'lookup':
uuid = req['args']['uuid']
result = self.server.cache[uuid]
if type(result) is pd.DataFrame:
page_size = int(req['args']['page_size'])
page_num = int(req['args']['page_num'])
i = page_size * page_num
j = i + page_size
result = result[i:j]
result = result.to_json()
except:
print traceback.format_exc()
response = 500
result = '{}'
t.stop()
self.send_response(response)
self.send_header('Content-type','application/json')
self.end_headers()
self.wfile.write(result)
print 'Run Time:', t.time()
| apache-2.0 |
bcimontreal/bci_workshop | python/lsl-viewer.py | 1 | 6793 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, lfilter_zi, firwin
from time import sleep
from pylsl import StreamInlet, resolve_byprop
from optparse import OptionParser
import seaborn as sns
from threading import Thread
sns.set(style="whitegrid")
parser = OptionParser()
parser.add_option("-w", "--window",
dest="window", type='float', default=5.,
help="window lenght to display in seconds.")
parser.add_option("-s", "--scale",
dest="scale", type='float', default=100,
help="scale in uV")
parser.add_option("-r", "--refresh",
dest="refresh", type='float', default=0.2,
help="refresh rate in seconds.")
parser.add_option("-f", "--figure",
dest="figure", type='string', default="15x6",
help="window size.")
filt = True
subsample = 2
buf = 12
(options, args) = parser.parse_args()
window = options.window
scale = options.scale
figsize = np.int16(options.figure.split('x'))
print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=2)
if len(streams) == 0:
raise(RuntimeError("Cant find EEG stream"))
print("Start aquiring data")
class LSLViewer():
def __init__(self, stream, fig, axes, window, scale, dejitter=True):
"""Init"""
self.stream = stream
self.window = window
self.scale = scale
self.dejitter = dejitter
self.inlet = StreamInlet(stream, max_chunklen=buf)
self.filt = True
info = self.inlet.info()
description = info.desc()
self.sfreq = info.nominal_srate()
self.n_samples = int(self.sfreq * self.window)
self.n_chan = info.channel_count()
ch = description.child('channels').first_child()
ch_names = [ch.child_value('label')]
for i in range(self.n_chan):
ch = ch.next_sibling()
ch_names.append(ch.child_value('label'))
self.ch_names = ch_names
fig.canvas.mpl_connect('key_press_event', self.OnKeypress)
fig.canvas.mpl_connect('button_press_event', self.onclick)
self.fig = fig
self.axes = axes
sns.despine(left=True)
self.data = np.zeros((self.n_samples, self.n_chan))
self.times = np.arange(-self.window, 0, 1./self.sfreq)
impedances = np.std(self.data, axis=0)
lines = []
for ii in range(self.n_chan):
line, = axes.plot(self.times[::subsample],
self.data[::subsample, ii] - ii, lw=1)
lines.append(line)
self.lines = lines
axes.set_ylim(-self.n_chan + 0.5, 0.5)
ticks = np.arange(0, -self.n_chan, -1)
axes.set_xlabel('Time (s)')
axes.xaxis.grid(False)
axes.set_yticks(ticks)
ticks_labels = ['%s - %.1f' % (ch_names[ii], impedances[ii])
for ii in range(self.n_chan)]
axes.set_yticklabels(ticks_labels)
self.display_every = int(0.2 / (12/self.sfreq))
# self.bf, self.af = butter(4, np.array([1, 40])/(self.sfreq/2.),
# 'bandpass')
self.bf = firwin(32, np.array([1, 40])/(self.sfreq/2.), width=0.05,
pass_zero=False)
self.af = [1.0]
zi = lfilter_zi(self.bf, self.af)
self.filt_state = np.tile(zi, (self.n_chan, 1)).transpose()
self.data_f = np.zeros((self.n_samples, self.n_chan))
def update_plot(self):
k = 0
while self.started:
samples, timestamps = self.inlet.pull_chunk(timeout=1.0,
max_samples=12)
if timestamps:
if self.dejitter:
timestamps = np.float64(np.arange(len(timestamps)))
timestamps /= self.sfreq
timestamps += self.times[-1] + 1./self.sfreq
self.times = np.concatenate([self.times, timestamps])
self.n_samples = int(self.sfreq * self.window)
self.times = self.times[-self.n_samples:]
self.data = np.vstack([self.data, samples])
self.data = self.data[-self.n_samples:]
filt_samples, self.filt_state = lfilter(
self.bf, self.af,
samples,
axis=0, zi=self.filt_state)
self.data_f = np.vstack([self.data_f, filt_samples])
self.data_f = self.data_f[-self.n_samples:]
k += 1
if k == self.display_every:
if self.filt:
plot_data = self.data_f
elif not self.filt:
plot_data = self.data - self.data.mean(axis=0)
for ii in range(self.n_chan):
self.lines[ii].set_xdata(self.times[::subsample] -
self.times[-1])
self.lines[ii].set_ydata(plot_data[::subsample, ii] /
self.scale - ii)
impedances = np.std(plot_data, axis=0)
ticks_labels = ['%s - %.2f' % (self.ch_names[ii],
impedances[ii])
for ii in range(self.n_chan)]
self.axes.set_yticklabels(ticks_labels)
self.axes.set_xlim(-self.window, 0)
self.fig.canvas.draw()
k = 0
else:
sleep(0.2)
def onclick(self, event):
print((event.button, event.x, event.y, event.xdata, event.ydata))
def OnKeypress(self, event):
if event.key == '/':
self.scale *= 1.2
elif event.key == '*':
self.scale /= 1.2
elif event.key == '+':
self.window += 1
elif event.key == '-':
if self.window > 1:
self.window -= 1
elif event.key == 'd':
self.filt = not(self.filt)
def start(self):
self.started = True
self.thread = Thread(target=self.update_plot)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.started = False
fig, axes = plt.subplots(1, 1, figsize=figsize, sharex=True)
lslv = LSLViewer(streams[0], fig, axes, window, scale)
help_str = """
toggle filter : d
toogle full screen : f
zoom out : /
zoom in : *
increase time scale : -
decrease time scale : +
"""
print(help_str)
lslv.start()
plt.show()
lslv.stop()
| mit |
TomAugspurger/pandas | pandas/core/internals/construction.py | 1 | 23734 | """
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import Axis, DtypeObj, Scalar
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndexClass,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical
from pandas.core.construction import extract_array, sanitize_array
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
Index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.managers import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from pandas import Series # noqa:F401
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
columns = ensure_index(columns)
else:
columns = ensure_index(columns)
index = ensure_index(index)
# from BlockManager perspective
axes = [columns, index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(
data, index, columns, dtype: Optional[DtypeObj], copy: bool
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
# TODO: numpy docs suggest fv must be scalar, but could it be
# non-scalar for object dtype?
assert lib.is_scalar(fv), fv
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], copy: bool):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
# e.g. ValueError when trying to cast object dtype to float64
raise ValueError(
f"failed to cast to '{dtype}' (Exception was: {orig})"
) from orig
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arrays: Union[Sequence[Any], "Series"]
if columns is not None:
from pandas.core.series import Series # noqa:F811
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or (
not is_extension_array_dtype(dtype)
and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype(object)
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = list(data.keys())
columns = data_names = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [
arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays
]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = np.arange(values.start, values.stop, values.step, dtype="int64")
return arr[..., np.newaxis]
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], "len"):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError("Must pass 2-d input")
return values
def _homogenize(data, index, dtype: Optional[DtypeObj]):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = com.dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("arrays must all be same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (
columns is not None
and len(columns)
and arr_columns is not None
and len(arr_columns)
):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def get_names_from_index(data):
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return index
def _get_axes(N, K, index, columns) -> Tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> @dataclass
>>> class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1,2), Point(2,3)])
[{"x":1,"y":2},{"x":2,"y":3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns, coerce_float: bool = False, dtype: Optional[DtypeObj] = None
):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], abc.Mapping):
return _list_of_dict_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], ABCSeries):
return _list_of_series_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (
isinstance(data, (np.ndarray, ABCSeries, Index))
and data.dtype.names is not None
):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = [tuple(x) for x in data]
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
def _list_to_arrays(
data: List[Scalar],
columns: Union[Index, List],
coerce_float: bool = False,
dtype: Optional[DtypeObj] = None,
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
# gh-26429 do not raise user-facing AssertionError
try:
columns = _validate_or_indexify_columns(content, columns)
result = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float)
except AssertionError as e:
raise ValueError(e) from e
return result, columns
def _list_of_series_to_arrays(
data: List,
columns: Union[Index, List],
coerce_float: bool = False,
dtype: Optional[DtypeObj] = None,
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: Dict[int, Scalar] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
columns = _validate_or_indexify_columns(content, columns)
content = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float)
return content, columns
else:
return values.T, columns
def _list_of_dict_to_arrays(
data: List,
columns: Union[Index, List],
coerce_float: bool = False,
dtype: Optional[DtypeObj] = None,
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
coerce_float : bool
dtype : np.dtype
Returns
-------
tuple
arrays, columns
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
columns = _validate_or_indexify_columns(content, columns)
content = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float)
return content, columns
def _validate_or_indexify_columns(
content: List, columns: Optional[Union[Index, List]]
) -> Union[Index, List[Axis]]:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content: list of data
columns: Iterable or None
Returns
-------
columns: If columns is Iterable, return as is; If columns is None, assign
positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = ibase.default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: List[Scalar], coerce_float: bool = False, dtype: Optional[DtypeObj] = None
) -> List[Scalar]:
"""
Internal function ot convert object array.
Parameters
----------
content: list of processed data records
coerce_float: bool, to coerce floats or not, default is False
dtype: np.dtype, default is None
Returns
-------
arrays: casted content if not object dtype, otherwise return as is in list.
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
# ---------------------------------------------------------------------
# Series-Based
def sanitize_index(data, index: Index):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if len(data) != len(index):
raise ValueError("Length of values does not match length of index")
if isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ["M", "m"]:
data = sanitize_array(data, index, copy=False)
return data
| bsd-3-clause |
arokem/scipy | scipy/linalg/basic.py | 2 | 56439 | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied, LinAlgWarning
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1-D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
b = _asarray_validated(b)
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if b is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff factor for 'small' singular values. In `lstsq`,
singular values less than ``cond*largest_singular_value`` will be
considered as zero. If both are omitted, the default value
``max(M, N) * eps`` is passed to `lstsq` where ``eps`` is the
corresponding machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just `eps` without the
factor ``max(M, N)``.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
if cond is None:
cond = max(a.shape) * np.spacing(a.real.dtype.type(1))
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values; singular values smaller than this
value are considered as zero. If both are omitted, the default value
``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the
machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
cond = np.max(s) * max(a.shape) * np.finfo(t).eps
rank = np.sum(s > cond)
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' singular values; singular values smaller than this
value are considered as zero. If both are omitted, the default
``max(M,N)*largest_eigenvalue*eps`` is used where ``eps`` is the
machine precision value of the datatype of ``a``.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
cond = np.max(np.abs(s)) * max(a.shape) * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, DOI:10.1007/BF02165404
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, Available online:
https://arxiv.org/abs/1401.5766
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/linear_model/bayes.py | 50 | 16145 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_offset, y_offset, X_scale)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset, y_offset, X_scale)
return self
| mit |
pratapvardhan/scikit-learn | sklearn/utils/deprecation.py | 77 | 2417 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
themrmax/scikit-learn | sklearn/tree/tree.py | 11 | 50091 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
from __future__ import division
import numbers
import warnings
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either None "
"or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split is not None:
warnings.warn("The min_impurity_split parameter is deprecated and"
" will be removed in version 0.21. "
"Use the min_impurity_decrease parameter instead.",
DeprecationWarning)
min_impurity_split = self.min_impurity_split
else:
min_impurity_split = 1e-7
if min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
if self.min_impurity_decrease < 0.:
raise ValueError("min_impurity_decrease must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_decrease=0.,
min_impurity_split=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
| bsd-3-clause |
jamestwebber/scipy | scipy/interpolate/ndgriddata.py | 2 | 7566 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbor interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(x, y)
Nearest-neighbor interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-D data.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
RuthAngus/K2rotation | plots/raw_and_vbg.py | 1 | 3921 | import numpy as np
import matplotlib.pyplot as plt
from gatspy.periodic import LombScargle
import fitsio
import time
def raw_and_vbg():
plotpar = {'axes.labelsize': 12,
'text.fontsize': 18,
'legend.fontsize': 18,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'text.usetex': True}
plt.rcParams.update(plotpar)
# eid = "201545182"
eid = "201183188"
fname = "../data/c1/ktwo%s-c01_lpd-lc.fits" % eid
# load raw data
data = fitsio.read(fname)
aps = fitsio.read(fname, 2)
y = data["flux"][:, np.argmin(aps["cdpp6"])]
x = data["time"]
q = data["quality"]
l = np.isfinite(y) * np.isfinite(x) * (q == 0)
y, x = y[l], x[l]
MAD = np.median(y - np.median(y))
y /= np.median(y)
y -= 1
x *= 24*3600 # convert to seconds
fs = np.arange(.1, 300, 4e-2) * 1e-6 # astero
# plot raw data
fig = plt.figure()
ax = fig.add_subplot(211)
ax1 = fig.add_subplot(311)
model = LombScargle().fit(x, y, np.ones_like(y)*1e-5)
period = 1. / fs
start = time.time()
raw_pgram = model.periodogram(period)
end = time.time()
print("LS time = ", end - start)
ax1.plot(fs[::3]*1e6, raw_pgram[::3], "k", label="$\mathrm{Raw}$")
ax.set_title("$\mathrm{EPIC~%s}$" % eid)
ax1.set_xlim(10, 280)
ax1.set_ylim(0, .015)
plt.ylabel("$\mathrm{Power}$")
# plt.legend()
# leg1 = Rectangle((0, 0), 0, 0, alpha=0.0)
# plt.legend([leg1], "$\mathrm{Raw}$", handlelength=0)
plt.text(230, .012, "$\mathrm{Raw}$")
ticks = ax1.get_yticks()
ax1.set_yticks(ticks[1:-1])
ax.set_yticklabels(ax.get_yticklabels(), visible=False)
ax.set_xticklabels(ax.get_xticklabels(), visible=False)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
# load andrew's lcs
ax2 = fig.add_subplot(312)
x, y = np.genfromtxt("../data/c1/ep%s.csv" % eid, skip_header=1).T
x *= 24*3600
model = LombScargle().fit(x, y, np.ones_like(y)*1e-5)
ps = 1. / fs
pgram = model.periodogram(ps)
ax2.plot(fs[::3]*1e6, pgram[::3], "k", label="$\mathrm{Detrended}$")
plt.text(200, .010, "$\mathrm{VJ14~Detrended}$")
# leg1 = Rectangle((0, 0), 0, 0, alpha=0.0)
# plt.legend([leg1], "$\mathrm{VJ14~Detrended}$", handlelength=0)
ax2.set_xlim(10, 280)
ax2.set_ylim(0, .015)
plt.ylabel("$\mathrm{Power}$")
ticks = ax2.get_yticks()
ax2.set_yticks(ticks[1:-1])
# fig.text(0.04, 0.5, "$\mathrm{Power}$", ha="center", va="top",
# rotation="vertical")
# load sip
fs, s2n = np.genfromtxt("../astero/%sastero_pgram.txt"
% str(int(eid))).T
ax3 = fig.add_subplot(313)
if MAD == 0.:
MAD = 1.
plt.plot(fs[::3], s2n[::3]*10e4/MAD**2, "k", label="$\mathrm{SIP}$")
# leg1 = Rectangle((0, 0), 0, 0, alpha=0.0)
# plt.legend([leg1], "$\mathrm{SIP}$", handlelength=0)
plt.text(230, 2.2, "$\mathrm{SIP}$")
ax3.set_xlim(10, 280)
plt.ylabel("$\mathrm{Relative~(S/N)}^2\mathrm{~(} \\times 10^{4}\mathrm{)}$")
plt.xlabel("$\\nu\mathrm{~(}\mu\mathrm{Hz)}$")
fig.subplots_adjust(hspace=0, bottom=.1)
# ticks = ax3.get_yticks()
# ax3.set_yticks(ticks[1:-1])
print("saving as ../documents/rawvbg_%s.pdf" % eid)
print("saving as poster_rawvbg_%s.pdf" % eid)
plt.savefig("../documents/rawvbg_%s.pdf" % eid)
plt.savefig("poster_rawvbg_%s" % eid, transparent=True)
# compute Fourier transform
sp = np.fft.fft(y)
freq = np.fft.fftfreq(x.shape[-1])
fft = sp.real**2 * np.imag**2
plt.clf()
plt.plot(freq, fft)
plt.savefig("fft")
if __name__ == "__main__":
raw_and_vbg()
| mit |
mmilutinovic1313/zipline-with-algorithms | zipline/history/history_container.py | 18 | 33931 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import insort_left
from collections import namedtuple
from itertools import groupby, product
import logbook
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import HistorySpec
from zipline.finance.trading import with_environment
from zipline.utils.data import RollingPanel, _ensure_index
from zipline.utils.munge import ffill, bfill
logger = logbook.Logger('History Container')
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(freq,
field,
buffer_frame,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# convert to ndarray if necessary
digest_values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
digest_values = digest_frame.values
buffer_values = buffer_frame
if raw and isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids) and len(digest_values):
# If we have any leading nans in the buffer and we have a non-empty
# digest frame, use the oldest digest values as the initial buffer
# values.
buffer_values[0, nan_sids] = digest_values[-1, nan_sids]
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids):
# If we still have leading nans, fall back to the last known values
# from before the digest.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
buffer_values[0, nan_sids] = filler
if raw:
filled = ffill(buffer_values)
return filled
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(freq,
field,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a digest frame, falling back to the last known prior values if
necessary.
"""
# convert to ndarray if necessary
values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
values = digest_frame.values
nan_sids = pd.isnull(values[0])
if np.any(nan_sids):
# If we have any leading nans in the frame, use values from pv_frame to
# seed values for those sids.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
values[0, nan_sids] = filler
if raw:
filled = ffill(values)
return filled
return digest_frame.ffill()
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
@with_environment()
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
def compute_largest_specs(history_specs):
"""
Maps a Frequency to the largest HistorySpec at that frequency from an
iterable of HistorySpecs.
"""
return {key: max(group, key=lambda f: f.bar_count)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
# tuples to store a change to the shape of a HistoryContainer
FrequencyDelta = namedtuple(
'FrequencyDelta',
['freq', 'buffer_delta'],
)
LengthDelta = namedtuple(
'LengthDelta',
['freq', 'delta'],
)
HistoryContainerDeltaSuper = namedtuple(
'HistoryContainerDelta',
['field', 'frequency_delta', 'length_delta'],
)
class HistoryContainerDelta(HistoryContainerDeltaSuper):
"""
A class representing a resize of the history container.
"""
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
"""
field is a new field that was added.
frequency is a FrequencyDelta representing a new frequency was added.
length is a bar LengthDelta which is a frequency and a bar_count.
If any field is None, then no change occurred of that type.
"""
return super(HistoryContainerDelta, cls).__new__(
cls, field, frequency_delta, length_delta,
)
@property
def empty(self):
"""
Checks if the delta is empty.
"""
return (self.field is None and
self.frequency_delta is None and
self.length_delta is None)
def normalize_to_data_freq(data_frequency, dt):
if data_frequency == 'minute':
return dt
return pd.tslib.normalize_date(dt)
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
VALID_FIELDS = {
'price', 'open_price', 'volume', 'high', 'low', 'close_price',
}
def __init__(self,
history_specs,
initial_sids,
initial_dt,
data_frequency,
bar_data=None):
"""
A container to hold a rolling window of historical data within a user's
algorithm.
Args:
history_specs (dict[Frequency:HistorySpec]): The starting history
specs that this container should be able to service.
initial_sids (set[Asset or Int]): The starting sids to watch.
initial_dt (datetime): The datetime to start collecting history from.
bar_data (BarData): If this container is being constructed during
handle_data, this is the BarData for the current bar to fill the
buffer with. If this is constructed elsewhere, it is None.
Returns:
An instance of a new HistoryContainer
"""
# History specs to be served by this container.
self.history_specs = history_specs
self.largest_specs = compute_largest_specs(
itervalues(self.history_specs)
)
# The set of fields specified by all history specs
self.fields = pd.Index(
sorted(set(spec.field for spec in itervalues(history_specs)))
)
self.sids = pd.Index(
sorted(set(initial_sids or []))
)
self.data_frequency = data_frequency
initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt)
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = pd.DataFrame(
data=None,
index=self.prior_values_index,
columns=self.prior_values_columns,
# Note: For bizarre "intricacies of the spaghetti that is pandas
# indexing logic" reasons, setting this dtype prevents indexing
# errors in update_last_known_values. This is safe for the time
# being because our only forward-fillable fields are floats. If we
# need to add a non-float-typed forward-fillable field, then we may
# find ourselves having to track down and fix a pandas bug.
dtype=np.float64,
)
_ffillable_fields = None
@property
def ffillable_fields(self):
if self._ffillable_fields is None:
fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE)
self._ffillable_fields = fillables
return self._ffillable_fields
@property
def prior_values_index(self):
index_values = list(
product(
(freq.freq_str for freq in self.unique_frequencies),
# Only store prior values for forward-fillable fields.
self.ffillable_fields,
)
)
if index_values:
return pd.MultiIndex.from_tuples(index_values)
else:
# MultiIndex doesn't gracefully support empty input, so we return
# an empty regular Index if we have values.
return pd.Index(index_values)
@property
def prior_values_columns(self):
return self.sids
@property
def all_panels(self):
yield self.buffer_panel
for panel in self.digest_panels.values():
yield panel
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.largest_specs)
@with_environment()
def _add_frequency(self, spec, dt, data, env=None):
"""
Adds a new frequency to the container. This reshapes the buffer_panel
if needed.
"""
freq = spec.frequency
self.largest_specs[freq] = spec
new_buffer_len = 0
if freq.max_bars > self.buffer_panel.window_length:
# More bars need to be held in the buffer_panel to support this
# freq
if freq.data_frequency \
!= self.buffer_spec.frequency.data_frequency:
# If the data_frequencies are not the same, then we need to
# create a fresh buffer.
self.buffer_panel = self.create_buffer_panel(
dt, bar_data=data,
)
new_buffer_len = None
else:
# The frequencies are the same, we just need to add more bars.
self._resize_panel(
self.buffer_panel,
freq.max_bars,
dt,
self.buffer_spec.frequency,
)
new_buffer_len = freq.max_minutes
# update the current buffer_spec to reflect the new lenght.
self.buffer_spec.bar_count = new_buffer_len + 1
if spec.bar_count > 1:
# This spec has more than one bar, construct a digest panel for it.
self.digest_panels[freq] = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self.cur_window_starts[freq] = dt
self.cur_window_closes[freq] = freq.window_close(
self.cur_window_starts[freq]
)
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return FrequencyDelta(freq, new_buffer_len)
def _add_field(self, field):
"""
Adds a new field to the container.
"""
# self.fields is already sorted, so we just need to insert the new
# field in the correct index.
ls = list(self.fields)
insort_left(ls, field)
self.fields = pd.Index(ls)
# unset fillable fields cache
self._ffillable_fields = None
self._realign_fields()
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return field
@with_environment()
def _add_length(self, spec, dt, env=None):
"""
Increases the length of the digest panel for spec.frequency. If this
does not have a panel, and one is needed; a digest panel will be
constructed.
"""
old_count = self.largest_specs[spec.frequency].bar_count
self.largest_specs[spec.frequency] = spec
delta = spec.bar_count - old_count
panel = self.digest_panels.get(spec.frequency)
if panel is None:
# The old length for this frequency was 1 bar, meaning no digest
# panel was held. We must construct a new one here.
panel = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self._resize_panel(
panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env,
)
self.digest_panels[spec.frequency] = panel
return LengthDelta(spec.frequency, delta)
@with_environment()
def _resize_panel(self, panel, size, dt, freq, env=None):
"""
Resizes a panel, fills the date_buf with the correct values.
"""
# This is the oldest datetime that will be shown in the current window
# of the panel.
oldest_dt = pd.Timestamp(panel.start_date, tz='utc',)
delta = size - panel.window_length
# Construct the missing dates.
missing_dts = self._create_window_date_buf(
delta, freq.unit_str, freq.data_frequency, oldest_dt,
)
panel.extend_back(missing_dts)
@with_environment()
def _create_window_date_buf(self,
window,
unit_str,
data_frequency,
dt,
env=None):
"""
Creates a window length date_buf looking backwards from dt.
"""
if unit_str == 'd':
# Get the properly key'd datetime64 out of the pandas Timestamp
if data_frequency != 'daily':
arr = env.open_close_window(
dt,
window,
offset=-window,
).market_close.astype('datetime64[ns]').values
else:
arr = env.open_close_window(
dt,
window,
offset=-window,
).index.values
return arr
else:
return env.market_minute_window(
env.previous_market_minute(dt),
window,
step=-1,
)[::-1].values
@with_environment()
def _create_panel(self, dt, spec, env=None):
"""
Constructs a rolling panel with a properly aligned date_buf.
"""
dt = normalize_to_data_freq(spec.frequency.data_frequency, dt)
window = spec.bar_count - 1
date_buf = self._create_window_date_buf(
window,
spec.frequency.unit_str,
spec.frequency.data_frequency,
dt,
env=env,
)
panel = RollingPanel(
window=window,
items=self.fields,
sids=self.sids,
initial_dates=date_buf,
)
return panel
@with_environment()
def _create_digest_panel(self,
dt,
spec,
window_starts=None,
window_closes=None,
env=None):
"""
Creates a digest panel, setting the window_starts and window_closes.
If window_starts or window_closes are None, then self.cur_window_starts
or self.cur_window_closes will be used.
"""
freq = spec.frequency
window_starts = window_starts if window_starts is not None \
else self.cur_window_starts
window_closes = window_closes if window_closes is not None \
else self.cur_window_closes
window_starts[freq] = freq.normalize(dt)
window_closes[freq] = freq.window_close(window_starts[freq])
return self._create_panel(dt, spec, env=env)
def ensure_spec(self, spec, dt, bar_data):
"""
Ensure that this container has enough space to hold the data for the
given spec. This returns a HistoryContainerDelta to represent the
changes in shape that the container made to support the new
HistorySpec.
"""
updated = {}
if spec.field not in self.fields:
updated['field'] = self._add_field(spec.field)
if spec.frequency not in self.largest_specs:
updated['frequency_delta'] = self._add_frequency(
spec, dt, bar_data,
)
if spec.bar_count > self.largest_specs[spec.frequency].bar_count:
updated['length_delta'] = self._add_length(spec, dt)
return HistoryContainerDelta(**updated)
def add_sids(self, to_add):
"""
Add new sids to the container.
"""
self.sids = pd.Index(
sorted(self.sids.union(_ensure_index(to_add))),
)
self._realign_sids()
def drop_sids(self, to_drop):
"""
Remove sids from the container.
"""
self.sids = pd.Index(
sorted(self.sids.difference(_ensure_index(to_drop))),
)
self._realign_sids()
def _realign_sids(self):
"""
Realign our constituent panels after adding or removing sids.
"""
self.last_known_prior_values = self.last_known_prior_values.reindex(
columns=self.sids,
)
for panel in self.all_panels:
panel.set_minor_axis(self.sids)
def _realign_fields(self):
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
for panel in self.all_panels:
panel.set_items(self.fields)
@with_environment()
def create_digest_panels(self,
initial_sids,
initial_dt,
env=None):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, largest_spec in iteritems(self.largest_specs):
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.normalize(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
dt = initial_dt
rp = self._create_digest_panel(
dt,
spec=largest_spec,
window_starts=first_window_starts,
window_closes=first_window_closes,
env=env,
)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_dt, bar_data):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(
freq.max_bars for freq in self.unique_frequencies
)
freq = '1m' if self.data_frequency == 'minute' else '1d'
spec = HistorySpec(
max_bars_needed + 1, freq, None, None, self.data_frequency,
)
rp = self._create_panel(
initial_dt, spec,
)
self.buffer_spec = spec
if bar_data is not None:
frame = self.frame_from_bardata(bar_data, initial_dt)
rp.add_frame(initial_dt, frame)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def digest_bars(self, history_spec, do_ffill):
"""
Get the last (history_spec.bar_count - 1) bars from self.digest_panel
for the requested HistorySpec.
"""
bar_count = history_spec.bar_count
if bar_count == 1:
# slicing with [1 - bar_count:] doesn't work when bar_count == 1,
# so special-casing this.
res = pd.DataFrame(index=[], columns=self.sids, dtype=float)
return res.values, res.index
field = history_spec.field
# Panel axes are (field, dates, sids). We want just the entries for
# the requested field, the last (bar_count - 1) data points, and all
# sids.
digest_panel = self.digest_panels[history_spec.frequency]
frame = digest_panel.get_current(field, raw=True)
if do_ffill:
# Do forward-filling *before* truncating down to the requested
# number of bars. This protects us from losing data if an illiquid
# stock has a gap in its price history.
filled = ffill_digest_frame_from_prior_values(
history_spec.frequency,
history_spec.field,
frame,
self.last_known_prior_values,
raw=True
# Truncate only after we've forward-filled
)
indexer = slice(1 - bar_count, None)
return filled[indexer], digest_panel.current_dates()[indexer]
else:
indexer = slice(1 - bar_count, None)
return frame[indexer, :], digest_panel.current_dates()[indexer]
def buffer_panel_minutes(self,
buffer_panel,
earliest_minute=None,
latest_minute=None,
raw=False):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@latest_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current(start=earliest_minute,
end=latest_minute,
raw=raw)
return buffer_panel
# Using .ix here rather than .loc because loc requires that the keys
# are actually in the index, whereas .ix returns all the values between
# earliest_minute and latest_minute, which is what we want.
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
frame = self.frame_from_bardata(data, algo_dt)
self.update_last_known_values()
self.update_digest_panels(algo_dt, self.buffer_panel)
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
This takes `buffer_panel` as an argument rather than using
self.buffer_panel so that this method can be used to add supplemental
data from an external source.
"""
for frequency in filter(freq_filter, self.unique_frequencies):
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
raw=True
)
if digest_panel is not None:
# Create a digest from minutes_to_process and add it to
# digest_panel.
digest_frame = self.create_new_digest_frame(
minutes_to_process,
self.fields,
self.sids
)
digest_panel.add_frame(
latest_minute,
digest_frame,
self.fields,
self.sids
)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def frame_to_series(self, field, frame, columns=None):
"""
Convert a frame with a DatetimeIndex and sid columns into a series with
a sid index, using the aggregator defined by the given field.
"""
if isinstance(frame, pd.DataFrame):
columns = frame.columns
frame = frame.values
if not len(frame):
return pd.Series(
data=(0 if field == 'volume' else np.nan),
index=columns,
).values
if field in ['price', 'close_price']:
# shortcircuit for full last row
vals = frame[-1]
if np.all(~np.isnan(vals)):
return vals
return ffill(frame)[-1]
elif field == 'open_price':
return bfill(frame)[0]
elif field == 'volume':
return np.nansum(frame, axis=0)
elif field == 'high':
return np.nanmax(frame, axis=0)
elif field == 'low':
return np.nanmin(frame, axis=0)
else:
raise ValueError("Unknown field {}".format(field))
def aggregate_ohlcv_panel(self,
fields,
ohlcv_panel,
items=None,
minor_axis=None):
"""
Convert an OHLCV Panel into a DataFrame by aggregating each field's
frame into a Series.
"""
vals = ohlcv_panel
if isinstance(ohlcv_panel, pd.Panel):
vals = ohlcv_panel.values
items = ohlcv_panel.items
minor_axis = ohlcv_panel.minor_axis
data = [
self.frame_to_series(
field,
vals[items.get_loc(field)],
minor_axis
)
for field in fields
]
return np.array(data)
def create_new_digest_frame(self, buffer_minutes, items=None,
minor_axis=None):
"""
Package up minutes in @buffer_minutes into a single digest frame.
"""
return self.aggregate_ohlcv_panel(
self.fields,
buffer_minutes,
items=items,
minor_axis=minor_axis
)
def update_last_known_values(self):
"""
Store the non-NaN values from our oldest frame in each frequency.
"""
ffillable = self.ffillable_fields
if not len(ffillable):
return
for frequency in self.unique_frequencies:
digest_panel = self.digest_panels.get(frequency, None)
if digest_panel:
oldest_known_values = digest_panel.oldest_frame(raw=True)
else:
oldest_known_values = self.buffer_panel.oldest_frame(raw=True)
oldest_vals = oldest_known_values
oldest_columns = self.fields
for field in ffillable:
f_idx = oldest_columns.get_loc(field)
field_vals = oldest_vals[f_idx]
# isnan would be fast, possible to use?
non_nan_sids = np.where(pd.notnull(field_vals))
key = (frequency.freq_str, field)
key_loc = self.last_known_prior_values.index.get_loc(key)
self.last_known_prior_values.values[
key_loc, non_nan_sids
] = field_vals[non_nan_sids]
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
do_ffill = history_spec.ffill
# Get our stored values from periods prior to the current period.
digest_frame, index = self.digest_bars(history_spec, do_ffill)
# Get minutes from our buffer panel to build the last row of the
# returned frame.
buffer_panel = self.buffer_panel_minutes(
self.buffer_panel,
earliest_minute=self.cur_window_starts[history_spec.frequency],
raw=True
)
buffer_frame = buffer_panel[self.fields.get_loc(field)]
if do_ffill:
buffer_frame = ffill_buffer_from_prior_values(
history_spec.frequency,
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
raw=True
)
last_period = self.frame_to_series(field, buffer_frame, self.sids)
return fast_build_history_output(digest_frame,
last_period,
algo_dt,
index=index,
columns=self.sids)
def fast_build_history_output(buffer_frame,
last_period,
algo_dt,
index=None,
columns=None):
"""
Optimized concatenation of DataFrame and Series for use in
HistoryContainer.get_history.
Relies on the fact that the input arrays have compatible shapes.
"""
buffer_values = buffer_frame
if isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
index = buffer_frame.index
columns = buffer_frame.columns
return pd.DataFrame(
data=np.vstack(
[
buffer_values,
last_period,
]
),
index=fast_append_date_to_index(
index,
pd.Timestamp(algo_dt)
),
columns=columns,
)
def fast_append_date_to_index(index, timestamp):
"""
Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not
appear to work.
"""
return pd.DatetimeIndex(
np.hstack(
[
index.values,
[timestamp.asm8],
]
),
tz='UTC',
)
| apache-2.0 |
aalmah/pylearn2 | pylearn2/cross_validation/dataset_iterators.py | 29 | 19389 | """
Cross-validation dataset iterators.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import warnings
try:
from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit,
StratifiedShuffleSplit)
except ImportError:
warnings.warn("Could not import from sklearn.")
from pylearn2.compat import OrderedDict
from pylearn2.cross_validation.blocks import StackedBlocksCV
from pylearn2.cross_validation.subset_iterators import (
ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit,
StratifiedValidationShuffleSplit)
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.transformer_dataset import TransformerDataset
class DatasetCV(object):
"""
Construct a new DenseDesignMatrix for each subset.
Parameters
----------
dataset : object
Full dataset for use in cross validation.
subset_iterator : iterable
Iterable that returns (train, test) or (train, valid, test) indices
for partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
def __init__(self, dataset, subset_iterator, preprocessor=None,
fit_preprocessor=False, which_set=None, return_dict=True):
self.dataset = dataset
self.subset_iterator = list(subset_iterator) # allow generator reuse
dataset_iterator = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True)
self._data = dataset_iterator.next()
self.preprocessor = preprocessor
self.fit_preprocessor = fit_preprocessor
self.which_set = which_set
if which_set is not None:
which_set = np.atleast_1d(which_set)
assert len(which_set)
for label in which_set:
if label not in ['train', 'valid', 'test']:
raise ValueError("Unrecognized subset '{}'".format(label))
self.which_set = which_set
self.return_dict = return_dict
def get_data_subsets(self):
"""
Partition the dataset according to cross-validation subsets and
return the raw data in each subset.
"""
for subsets in self.subset_iterator:
labels = None
if len(subsets) == 3:
labels = ['train', 'valid', 'test']
elif len(subsets) == 2:
labels = ['train', 'test']
# data_subsets is an OrderedDict to maintain label order
data_subsets = OrderedDict()
for i, subset in enumerate(subsets):
subset_data = tuple(data[subset] for data in self._data)
if len(subset_data) == 2:
X, y = subset_data
else:
X, = subset_data
y = None
data_subsets[labels[i]] = (X, y)
yield data_subsets
def __iter__(self):
"""
Create a DenseDesignMatrix for each dataset subset and apply any
preprocessing to the child datasets.
"""
for data_subsets in self.get_data_subsets():
datasets = {}
for label, data in data_subsets.items():
X, y = data
datasets[label] = DenseDesignMatrix(X=X, y=y)
# preprocessing
if self.preprocessor is not None:
self.preprocessor.apply(datasets['train'],
can_fit=self.fit_preprocessor)
for label, dataset in datasets.items():
if label == 'train':
continue
self.preprocessor.apply(dataset, can_fit=False)
# which_set
if self.which_set is not None:
for label, dataset in list(datasets.items()):
if label not in self.which_set:
del datasets[label]
del data_subsets[label]
if not len(datasets):
raise ValueError("No matching dataset(s) for " +
"{}".format(self.which_set))
if not self.return_dict:
# data_subsets is an OrderedDict to maintain label order
datasets = list(datasets[label]
for label in data_subsets.keys())
if len(datasets) == 1:
datasets, = datasets
yield datasets
class StratifiedDatasetCV(DatasetCV):
"""
Subclass of DatasetCV for stratified experiments, where
the relative class proportions of the full dataset are maintained in
each partition.
Parameters
----------
dataset : object
Dataset to use in cross validation.
subset_iterator : iterable
Iterable that returns train/test or train/valid/test splits for
partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
@staticmethod
def get_y(dataset):
"""
Stratified cross-validation requires label information for
examples. This function gets target values for a dataset,
converting from one-hot encoding to a 1D array as needed.
Parameters
----------
dataset : object
Dataset containing target values for examples.
"""
y = np.asarray(dataset.y)
if y.ndim > 1:
assert np.array_equal(np.unique(y), [0, 1])
y = np.argmax(y, axis=1)
return y
class TransformerDatasetCV(object):
"""
Cross-validation with dataset transformations. This class returns
dataset subsets after transforming them with one or more pretrained
models.
Parameters
----------
dataset_iterator : DatasetCV
Cross-validation dataset iterator providing train/test or
train/valid/test datasets.
transformers : Model or iterable
Transformer model(s) to use for transforming datasets.
"""
def __init__(self, dataset_iterator, transformers):
self.dataset_iterator = dataset_iterator
self.transformers = transformers
def __iter__(self):
"""
Construct a Transformer dataset for each partition.
"""
for k, datasets in enumerate(self.dataset_iterator):
if isinstance(self.transformers, list):
transformer = self.transformers[k]
elif isinstance(self.transformers, StackedBlocksCV):
transformer = self.transformers.select_fold(k)
else:
transformer = self.transformers
if isinstance(datasets, list):
for i, dataset in enumerate(datasets):
datasets[i] = TransformerDataset(dataset, transformer)
else:
for key, dataset in datasets.items():
datasets[key] = TransformerDataset(dataset, transformer)
yield datasets
class DatasetKFold(DatasetCV):
"""
K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = KFold(n, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
super(DatasetKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
try:
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
except TypeError:
assert not shuffle and not random_state, (
"The 'shuffle' and 'random_state' arguments are not " +
"supported by this version of sklearn. See "
"http://scikit-learn.org/stable/developers/index.html" +
"#git-repo for details on installing the development version.")
cv = StratifiedKFold(y, n_folds=n_folds)
super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs)
class DatasetShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size,
train_size=train_size, random_state=random_state)
super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
train_size=train_size,
random_state=random_state)
super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationKFold(DatasetCV):
"""
K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = ValidationKFold(n, n_folds, shuffle, random_state)
super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetValidationKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state)
super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size,
train_size, random_state)
super(DatasetValidationShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation with train/valid/test
subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size,
train_size, random_state)
super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset,
cv,
**kwargs)
| bsd-3-clause |
diharaw/EmoLib | scripts/preprocess/4_trim.py | 1 | 3138 | from __future__ import division
import pickle
import pandas as pd
import numpy as np
import librosa
import matplotlib.pyplot as plt
import thinkdsp
import thinkplot
import os
import sys
import glob
import shutil
from itertools import compress
from itertools import compress
from collections import Counter
from pydub import AudioSegment
from pydub.utils import db_to_float
from pydub.silence import detect_nonsilent
emotions0 = ["neutral", "anger", "fear", "happy", "sadness", "disgust"]
emotions1 = ["neutral", "anger", "fear", "happy", "sadness", "surprise"]
emotions = [emotions0, emotions1]
sets = ["train", "validate", "test"]
if len(sys.argv) == 1:
print("usage: trim.py <dataset_folder> <threshold> <min_size> <label_type> <graph>")
sys.exit()
DATASET_ROOT = sys.argv[1]
SILENCE_THRESHOLD = int(sys.argv[2])
MIN_SIZE = int(sys.argv[3])
LABEL_TYPE = int(sys.argv[4])
SHOW_GRAPH = int(sys.argv[5])
# Remove folder
if os.path.exists("%s/3 - TRIMMED" %DATASET_ROOT):
shutil.rmtree("%s/3 - TRIMMED" %DATASET_ROOT)
file_count = 0
for set_type in sets:
os.makedirs("%s/3 - TRIMMED/%s" %(DATASET_ROOT, set_type))
for index in range(0, 6):
os.makedirs("%s/3 - TRIMMED/%s/%s" %(DATASET_ROOT, set_type, emotions[LABEL_TYPE][index]))
files = glob.glob("%s/2 - PARTITIONED/%s/%s/*" %(DATASET_ROOT, set_type, emotions[LABEL_TYPE][index]))
for f in files:
segment = AudioSegment.from_file(f, format="wav")
samples = np.array(segment.get_array_of_samples())
length = len(samples)/segment.frame_rate
print("Processing file : %s" % f)
print("Length : %ss" % length)
# Split ranges
ranges = detect_nonsilent(audio_segment=segment, min_silence_len=5, silence_thresh=-SILENCE_THRESHOLD)
# Time
t = np.arange(len(samples))/segment.frame_rate
# Plot graph
fig = None
ax = None
start = 0
end = len(samples)
if SHOW_GRAPH == 1:
i = int(round(start * segment.frame_rate))
j = int(round(end * segment.frame_rate))
fig, ax = plt.subplots()
ax.set_title(f)
ax.plot(t[i:j],samples[i:j])
trimmed = None
for range_item in ranges:
r0 = (range_item[0]/len(segment)) * length
r1 = (range_item[1]/len(segment)) * length
range_size = range_item[1] - range_item[0]
if range_size > MIN_SIZE:
if trimmed == None:
trimmed = segment[range_item[0]:range_item[1]]
else:
trimmed += segment[range_item[0]:range_item[1]]
if SHOW_GRAPH == 1:
ax.axvspan(r0, r1, alpha=0.5, color='red')
if SHOW_GRAPH == 1:
plt.show()
trimmed.export("%s/3 - TRIMMED/%s/%s/%s.wav" %(DATASET_ROOT, set_type, emotions[LABEL_TYPE][index], file_count), format="wav")
file_count = file_count + 1
| mit |
KNMI/VERCE | verce-hpc-pe/src/test/rtxcorr/rtxcorr3.py | 2 | 28286 | from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.provenance import *
from dispel4py.new.processor import *
import time
import random
import numpy
import traceback
from dispel4py.base import create_iterative_chain, GenericPE, ConsumerPE, IterativePE, SimpleFunctionPE
from dispel4py.new.simple_process import process_and_return
import socket
import json
import ujson
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
import networkx as nx
import os
from copy import deepcopy
from dateutil.parser import parse as parse_date
sns.set(style="white")
class Start(GenericPE):
def __init__(self):
GenericPE.__init__(self)
self._add_input('iterations')
self._add_output('output')
#self.prov_cluster="myne"
def _process(self,inputs):
if 'iterations' in inputs:
inp=inputs['iterations']
self.write('output',inp,metadata={'iterations':inp})
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
class Source(GenericPE):
def __init__(self,sr,index):
GenericPE.__init__(self)
self._add_input('iterations')
self._add_output('output')
self.sr=sr
self.var_index=index
#self.prov_cluster="myne"
self.parameters={'sampling_rate':sr}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
def _process(self,inputs):
if 'iterations' in inputs:
iteration=inputs['iterations'][0]
#Streams out values at 1/self.sr sampling rate, until iteration>0
while (iteration>0):
val=random.uniform(0,100)
time.sleep(1/self.sr)
iteration-=1
self.write('output',(self.name,val),metadata={'val':val,'var_index':self.var_index,'iteration':iteration})
class MaxClique(GenericPE):
def __init__(self,threshold):
GenericPE.__init__(self)
self._add_input('matrix',grouping=[2])
self._add_output('graph')
self._add_output('clique')
self.threshold=threshold
#self.prov_cluster="myne"
self.parameters={'threshold':threshold}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'
def _process(self,inputs):
if 'matrix' in inputs:
matrix=inputs['matrix'][0]
batch=inputs['matrix'][1]
low_values_indices = matrix < self.threshold # Where values are low
matrix[low_values_indices] = 0
#self.log(matrix)
self.log(batch)
self.write('graph',matrix,metadata={'matrix':str(matrix),'batch':batch})
self.write('clique',matrix,metadata={'clique':str(matrix),'batch':batch},ignore_inputs=False)
G = nx.from_numpy_matrix(matrix)
plt.figure(batch)
nx.draw(G)
fig1 = plt.gcf()
plt.close(fig1)
#H = nx.from_numpy_matrix(matrix)
#plt.figure(2)
#nx.draw(H)
#plt.close()
#Streams out values at 1/self.sr sampling rate, until iteration>0
class CompMatrix(GenericPE):
def __init__(self,variables_number):
GenericPE.__init__(self)
self._add_output('output')
self.size=variables_number
self.parameters={'variables_number':variables_number}
self.data={}
#Uncomment this line to associate this PE to the mycluster provenance-cluster
#self.prov_cluster ='mycluster'self.prov_cluster='mycluster'
def _process(self,data):
for x in data:
if data[x][1] not in self.data:
#prepares the data to visualise the xcor matrix of a specific batch number.
self.data[data[x][1]]={}
self.data[data[x][1]]['matrix']=numpy.identity(self.size)
self.data[data[x][1]]['ro_count']=0
self.data[data[x][1]]['matrix'][(data[x][2][1],data[x][2][0])]=data[x][0]
self.update_prov_state('batch_'+str(data[x][1]),self.data[data[x][1]]['matrix'],metadata={'matrix':str(self.data[data[x][1]]['matrix'])},dep=['batch_'+str(data[x][1])],ignore_inputs=False)
self.data[data[x][1]]['ro_count']+=1
if self.data[data[x][1]]['ro_count']==(self.size*(self.size-1))/2:
matrix=self.data[data[x][1]]['matrix']
d = pd.DataFrame(data=matrix,
columns=range(0,self.size),index=range(0,self.size))
mask = numpy.zeros_like(d, dtype=numpy.bool)
mask[numpy.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(d, mask=mask, cmap=cmap, vmax=1,
square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
#sns.plt.show()
#self.log(matrix)
self.write('output',(matrix,data[x][1],self.name),metadata={'matrix':str(d),'batch':str(data[x][1])},dep=['batch_'+str(data[x][1])])
class CorrCoef(GenericPE):
def __init__(self,batch_size,index):
GenericPE.__init__(self)
self._add_input('input1',grouping=[0])
self._add_input('input2',grouping=[0])
self._add_output('output')
self.index1=0
self.index2=0
self.batch1=[]
self.batch2=[]
self.size=batch_size
self.parameters={'batch_size':batch_size}
self.index=index
self.batchnum=1
def _process(self, inputs):
index=None
val=None
try:
val = inputs['input1'][1]
self.batch1.append(val)
#self.log("Variables= "+str(inputs['input1'][0]))
#if len(self.batch1)>=self.size:
contributesto=(len(self.batch1)-1)/self.size+self.batchnum
#Umment to record entities in the Provenance State
self.update_prov_state('batch1_'+str(contributesto),self.batch1,metadata={'name':'batch1_'+str(contributesto),'batch1':str(self.batch1)}, ignore_inputs=False,dep=['batch1_'+str(contributesto)])
except KeyError:
#traceback.print_exc(file=sys.stderr)
val = inputs['input2'][1]
self.batch2.append(val)
#self.log("Variables= "+str(inputs['input2'][0]))
#if len(self.batch2)>=self.size:
contributesto=(len(self.batch2)-1)/self.size+self.batchnum
#Uncomment to record Element in the Provenance State
self.update_prov_state('batch2_'+str(contributesto),self.batch2,metadata={'name':'batch2_'+str(contributesto),'batch2':str(self.batch2)}, ignore_inputs=False, dep=['batch2_'+str(contributesto)])
#self.update_prov_state(None,,ignore_dep=False)
if len(self.batch2)>=self.size and len(self.batch1)>=self.size:
array1=numpy.array(self.batch1[0:self.size])
array2=numpy.array(self.batch2[0:self.size])
ro=numpy.corrcoef([array1,array2])
# stream out the correlation coefficient, the sequence number of the batch and the indexes of the sources.
#Uncomment to reference entities in the Provenance State
self.write('output',(ro[0][1],self.batchnum,self.index,self.name),metadata={'batchnum':self.batchnum,'ro':ro[0][1],'array1':str(array1),'array2':str(array2),'source_index':self.index},dep=['batch1_'+str(self.batchnum),'batch2_'+str(self.batchnum)])
#Uncomment to reference entities in the Data Flow
#self.write('output',(ro[0][1],self.batchnum,self.index),metadata={'batchnum':self.batchnum,'ro':str(ro[0][1]),'array1':str(array1),'array2':str(array2),'source_index':self.index})
self.batchnum+=1
#self.log(self.batchnum)
self.batch1=self.batch1[(self.size):len(self.batch1)]
self.batch2=self.batch2[(self.size):len(self.batch2)]
# number of projections = iterations/batch_size at speed defined by sampling rate
variables_number=2
sampling_rate=10000
batch_size=20
iterations=20
input_data = {"Start": [{"iterations": [iterations]}]}
# Instantiates the Workflow Components
# and generates the graph based on parameters
def createWf():
graph = WorkflowGraph()
mat=CompMatrix(variables_number)
mat.prov_cluster='record2'
mc = MaxClique(-0.01)
mc.prov_cluster='record0'
start=Start()
start.prov_cluster='record0'
sources={}
mc.numprocesses=1
mat.numprocesses=1
for i in range(0,variables_number):
sources[i] = Source(sampling_rate,i)
sources[i].prov_cluster='record0'
#'+str(i%variables_number)
#+str(i%7)
sources[i].numprocesses=1
#sources[i].name="Source"+str(i)
for h in range(0,variables_number):
graph.connect(start,'output',sources[h],'iterations')
for j in range(h+1,variables_number):
cc=CorrCoef(batch_size,(h,j))
cc.prov_cluster='record1'
#+str(h%variables_number)
mat._add_input('input'+'_'+str(h)+'_'+str(j),grouping=[3])
graph.connect(sources[h],'output',cc,'input1')
graph.connect(sources[j],'output',cc,'input2')
graph.connect(cc,'output',mat,'input'+'_'+str(h)+'_'+str(j))
cc.numprocesses=1
graph.connect(mat,'output',mc,'matrix')
return graph
#from dispel4py.visualisation import display
#display(graph)
print ("Preparing for: "+str(iterations/batch_size)+" projections" )
#Store via sensors
ProvenanceRecorder.REPOS_URL='http://127.0.0.1:8082/workflow/insert'
#Store via service
ProvenancePE.REPOS_URL='http://127.0.0.1:8082/workflow/insert'
#Store to local path
ProvenancePE.PROV_PATH=os.environ['PROV_PATH']
#Size of the provenance bulk before storage
ProvenancePE.BULK_SIZE=1
#ProvenancePE.REPOS_URL='http://climate4impact.eu/prov/workflow/insert'
class ProvenanceSpecs(ProvenancePE):
def __init__(self):
ProvenancePE.__init__(self)
self.streammeta=[]
self.count=1
def extractItemMetadata(self, data, port='output'):
return {'this':data}
class ProvenanceOnWriteOnly(ProvenancePE):
def __init__(self):
ProvenancePE.__init__(self)
self.streammeta=[]
self.count=1
def apply_state_reset_policy(self,event,value):
if (event=='state'):
#self.log(event)
self.provon=False
super(ProvenanceOnWriteOnly,self).apply_state_reset_policy(event,value)
#self.provon=False
class ProvenanceRecorderToService(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToService', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=2
self.convertToW3C = toW3C
def _postprocess(self):
self.connection.close()
def _preprocess(self):
self.provurl = urlparse(ProvenanceRecorder.REPOS_URL)
self.connection = httplib.HTTPConnection(
self.provurl.netloc)
def sendToService(self,prov):
params = urllib.urlencode({'prov': ujson.dumps(prov)})
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json"}
self.connection.request(
"POST",
self.provurl.path,
params,
headers)
response = self.connection.getresponse()
self.log("Postprocress: " +
str((response.status, response.reason, response)))
self.connection.close()
def process(self, inputs):
try:
for x in inputs:
prov = inputs[x]
if "_d4p" in prov:
prov = prov["_d4p"]
elif "provenance" in prov:
prov = prov["provenance"]
#self.log(prov)
self.sendToService(prov)
except:
self.log(traceback.format_exc())
class ProvenanceRecorderToFile(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToFile', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=3
self.convertToW3C = toW3C
def process(self, inputs):
try:
None
for x in inputs:
#self.log(x)
prov = inputs[x]
#if isinstance(prov, list) and "data" in prov[0]:
# prov = prov[0]["data"]
#el
if "_d4p" in prov:
prov = prov["_d4p"]
elif "provenance" in prov:
prov = prov["provenance"]
filep = open(
os.environ['PROV_PATH'] +
"/bulk_" +
getUniqueId(),
"wr")
ujson.dump(prov, filep)
filep.close()
except:
self.log(traceback.format_exc())
class ProvenanceSummaryToService(ProvenanceRecorderToService):
def __init__(self, name='ProvenanceSummaryToService', toW3C=False):
ProvenanceRecorderToService.__init__(self)
self.name = name
#self.numprocesses=3
self.convertToW3C = toW3C
self.doc_count = 0
self.document={}
self.streamsstart=[]
self.streamsend=[]
self.document.update({'streams':[{'content':[{},{}]}]})
self.document.update({'startTime':None})
self.document.update({'endTime':None})
self.document.update({'derivationIds':[]})
self.document.update({'parameters':[]})
self.contente=[]
self.contents=[]
self.derivationIndex={}
self.content=[]
self.locationss=[]
self.locationse=[]
self.update=False
def postprocess(self):
if self.update>0:
self.sendToService(self.document)
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
#self.log(x)
self.sendToService(prov)
return None
elif "provenance" in prov:
prov = prov["provenance"]
if isinstance(prov, list):
for x in prov:
self.doc_count+=1
#self.log(x)
for key in x:
if isinstance(x[key], list):
continue
if self.doc_count==1 and (key!='startTime') and (key!='endTime'):
self.document.update({key:x[key]})
self.document.update({'_id':x['prov_cluster']+"_"+x['runId']})
self.document.update({'instanceId':x['prov_cluster']+"_"+x['runId']})
#
if (self.document['startTime'] == None) or parse_date(self.document['startTime']) > parse_date(x['startTime']):
#self.log("Adj time to: "+str(x['endTime']))
self.document.update({'startTime':x['startTime']})
self.streamsstart=x['streams']
elif (self.document['endTime'] == None) or parse_date(self.document['endTime']) < parse_date(x['endTime']):
self.document.update({'endTime':x['endTime']})
self.streamsend=x['streams']
self.document.update(x['parameters'])
for d in x['derivationIds']:
if d['prov_cluster'] not in self.derivationIndex:
derivation = {'DerivedFromDatasetID':
'Data_'+d['prov_cluster']+"_"+self.document['runId']
}
self.derivationIndex.update({d['prov_cluster']:derivation})
for d in self.streamsstart:
if 'location' in d and d['location']!='':
self.locationss.append(d['location'])
for c in d['content']:
self.contents.append(c)
for d in self.streamsend:
if 'location' in d and d['location']!='':
self.locationse.append(d['location'])
for c in d['content']:
self.contente.append(c)
if len(self.contents)>0:
self.update=True
self.document['streams'][0]['content'][0]=self.contents
self.document['streams'][0].update({'id':'Data_'+self.document['prov_cluster']+"_"+self.document['runId'],'location':self.locationss})
if len(self.contente)>0:
self.update=True
self.document['streams'][0]['content'][1]=self.contente
self.document['streams'][0].update({'id':'Data_'+self.document['prov_cluster']+"_"+self.document['runId'],'location':self.locationse})
self.document['streams'][0]['content']=self.document['streams'][0]['content'][0]+self.document['streams'][0]['content'][1]
for x in self.derivationIndex:
self.document['derivationIds'].append(self.derivationIndex[x])
if self.update:
#Self.log(self.document)
# del self.document['streamsstart']
# del self.document['streamsend']
self.sendToService(self.document)
self.update=False
self.contente=[]
self.contents=[]
#for key in self.document:
# del key
#self.document.update({'streamsstart':[]})
#self.document.update({'streamsend':[]})
# self.document.update({'startTime':None})
# self.document.update({'endTime':None})
# self.document.update({'derivationIds':[]})
except:
self.log(traceback.format_exc())
class ProvenanceTimedSensorToService(ProvenanceRecorderToService):
INTERVAL_S=4
WINDOW_S=10
def __init__(self, name='ProvenanceSummaryToService', toW3C=False):
ProvenanceRecorderToService.__init__(self)
self.name = name
#self.numprocesses=3
self.convertToW3C = toW3C
self.doc_count = 0
self.document={}
self.streamsstart=[]
self.streamsend=[]
self.document.update({'streams':[]})
self.document.update({'startTime':None})
self.document.update({'endTime':None})
self.document.update({'derivationIds':[]})
self.document.update({'parameters':[]})
self.contente=[]
self.contents=[]
self.derivationIndex={}
self.content=[]
self.locationss=[]
self.locationse=[]
self.update=True
self.current=time.time()
self.last=time.time()
self.update=True
def postprocess(self):
if self.update>0:
self.sendToService(self.document)
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
self.sendToService(prov)
return None
elif "provenance" in prov:
prov = prov["provenance"]
self.current=time.time()
if self.update or (self.current-self.last)>ProvenanceTimedSensorToService.INTERVAL_S:
print self.current-self.last
self.last=time.time()
if isinstance(prov, list):
for x in prov:
self.doc_count+=1
for key in x:
if isinstance(x[key], list):
continue
else:
self.document.update({key:x[key]})
self.document.update({'_id':x['prov_cluster']+"_"+x['runId']})
self.document['streams'][0]['content']=self.document['streams'][0]['content']+x['streams']
for d in x['derivationIds']:
if d['prov_cluster'] not in self.derivationIndex:
derivation = {'DerivedFromDatasetID':
'Data_'+d['prov_cluster']+"_"+self.document['runId']
}
self.derivationIndex.update({d['prov_cluster']:derivation})
#self.document['streams'][0]['content']=self.document['streams'][0]['content'][0]+self.document['streams'][0]['content'][1]
for x in self.derivationIndex:
self.document['derivationIds'].append(self.derivationIndex[x])
index=0
for x in self.document['streams']:
x['id']='Data_'+self.document['prov_cluster']+"_"+self.document['runId']+'_'+str(index)
index+=1
self.current = time.time()
self.sendToService(self.document)
self.update=False
except:
self.log(traceback.format_exc())
class ProvenanceRecorderToFileBulk(ProvenanceRecorder):
def __init__(self, name='ProvenanceRecorderToFileBulk', toW3C=False):
ProvenanceRecorder.__init__(self)
self.name = name
self.numprocesses=3
self.convertToW3C = toW3C
self.bulk = []
def postprocess(self):
try:
if len(self.bulk)>0:
filep = open(os.environ['PROV_PATH'] + "/bulk_" + getUniqueId(), "wr")
ujson.dump(self.bulk, filep)
filep.close()
self.bulk[:]=[]
#del self.bulk[:]
#self.bulk = []
None
except:
self.log(traceback.format_exc())
def process(self, inputs):
try:
out = None
for x in inputs:
prov = inputs[x]
if isinstance(prov, list) and "data" in prov[0]:
prov = prov[0]["data"]
elif "_d4p" in prov:
prov = prov["_d4p"]
self.bulk.append(prov)
#self.log(os.environ['PBS_NODEFILE'])
#self.log(socket.gethostname())
if len(self.bulk) == 100:
#:
# None
filep = open(
os.environ['PROV_PATH'] +
"/bulk_" +
getUniqueId(),
"wr")
ujson.dump(self.bulk, filep)
#
filep.close()
self.bulk[:]=[]
# for x in self.bulk:
# del x
except:
self.log(traceback.format_exc())
def createGraphWithProv():
graph=createWf()
#Location of the remote repository for runtime updates of the lineage traces. Shared among ProvenanceRecorder subtypes
# Ranomdly generated unique identifier for the current run
rid=os.environ['RUN_ID']
# Finally, provenance enhanced graph is prepared:
##Initialise provenance storage in files:
#profile_prov_run(graph,None,provImpClass=(ProvenancePE,),componentsType={'CorrCoef':(ProvenancePE,)},username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='file')
# skip_rules={'CorrCoef':{'ro':{'$lt':0}}})
#Initialise provenance storage to service:
#profile_prov_run(graph,None,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='service')
#skip_rules={'CorrCoef':{'ro':{'$lt':0}}})
#clustersRecorders={'record0':ProvenanceRecorderToFileBulk,'record1':ProvenanceRecorderToFileBulk,'record2':ProvenanceRecorderToFileBulk,'record6':ProvenanceRecorderToFileBulk,'record3':ProvenanceRecorderToFileBulk,'record4':ProvenanceRecorderToFileBulk,'record5':ProvenanceRecorderToFileBulk}
#Initialise provenance storage to sensors and Files:
#profile_prov_run(graph,ProvenanceRecorderToFile,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#clustersRecorders=clustersRecorders)
#Initialise provenance storage to sensors and service:
#profile_prov_run(graph,ProvenanceRecorderToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#Summary view on each component
#profile_prov_run(graph,ProvenanceTimedSensorToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor')
#Configuring provenance feedback-loop
#profile_prov_run(graph,ProvenanceTimedSensorToService,provImpClass=(ProvenancePE,),username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='sensor',feedbackPEs=['Source','MaxClique'])
#Initialise provenance storage end associate a Provenance type with specific components:
#profile_prov_run(graph,provImpClass=ProvenancePE,componentsType={'Source':(ProvenanceStock,)},username='aspinuso',runId=rid,w3c_prov=False,description="provState",workflowName="test_rdwd",workflowId="xx",save_mode='service')
#
return graph
#.. and visualised..
import argparse
from dispel4py.new.multi_process import process
args = argparse.Namespace
args.num = 424
args.simple = False
num=1
#print("PROV TO SENSOR")
print("PROV TO FILE")
#print("NO PROV")
graph = createGraphWithProv()
#graph = createWf()
#global gtime
#gtime = time.time()
from dispel4py.visualisation import *
display(graph) | mit |
Robak23/forex_deep | Final/LSTM_model.py | 1 | 2901 |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import functions as f
import os
import sys
nb_dir = os.getcwd()
if nb_dir not in sys.path:
sys.path.append(nb_dir)
# In[2]:
data = pd.read_csv("data/EURUSD_daily.csv", index_col='Date')
data.index = pd.to_datetime(data.index)
data.columns = ['close', 'open', 'high', 'low', 'pct']
data.drop('pct', axis=1, inplace=True)
data.sort_index(inplace=True)
# In[3]:
# add log returns and moving average
data['ret'] = np.log(data.close) - np.log(data.close.shift(1))
data['ma5'] = data.ret.rolling(5).mean()
data['ma20'] = data.ret.rolling(20).mean()
# In[4]:
# remove unstationarity from data
data.close = data.close - data.close.shift(1)
data.open = data.open - data.open.shift(1)
data.high = data.high - data.high.shift(1)
data.low = data.low - data.low.shift(1)
# In[5]:
data.dropna(inplace=True)
# In[6]:
for col in data.columns:
data[col] = f.normalize(data[col])
# In[7]:
split = pd.Timestamp('01-01-2015')
# In[8]:
train = data.loc[:split,]
test = data.loc[split:,]
# In[9]:
for col in data.columns:
train.loc[:,col], test.loc[:,col] = f.scale(train.loc[:,col], test.loc[:,col])
# In[34]:
x_train = train[:-1]
y_train = train.ma5.shift(-1)
y_train.dropna(inplace=True)
x_test = test[:-1]
y_test = test.ma5.shift(-1)
y_test.dropna(inplace=True)
# In[35]:
y_test
# In[36]:
x_test
# ### Training model
# In[37]:
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
import keras.backend as K
from keras.callbacks import EarlyStopping
# In[38]:
batch_size = 32
epochs = 1000
validation_split = 0.05
# In[39]:
x_train_np = x_train.values
y_train_np = y_train.values
x_test_np = x_test.values
y_test_np = y_test.values
# In[40]:
x_train_t = x_train_np.reshape(x_train.shape[0], 1, x_train.shape[1])
x_test_t = x_test_np.reshape(x_test.shape[0], 1, x_test.shape[1])
# In[41]:
early_stop = EarlyStopping(monitor='loss', patience=2, verbose=1)
# In[42]:
K.clear_session()
model = Sequential()
model.add(LSTM(100, input_shape= (x_train_t.shape[1], x_train_t.shape[2]), activation='tanh', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(250, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='tanh'))
model.compile(loss='mae', optimizer='adam', metrics=['accuracy', 'mae'])
# In[43]:
model.summary()
# In[44]:
history = model.fit(
x_train_t,
y_train,
epochs=epochs,
batch_size=batch_size,
verbose=1,
callbacks=[early_stop],
validation_split=validation_split)
# In[45]:
eval_df = x_test
# In[46]:
eval_df['pred'] = model.predict(x_test_t, batch_size=batch_size)
# In[47]:
eval_df['real'] = y_test
# In[48]:
eval_df.head()
# In[50]:
eval_df.loc[:,['real','pred']].plot(figsize=(16,9))
plt.show() | gpl-3.0 |
china-quant/backtrader | backtrader/plot/plot.py | 1 | 24966 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import bisect
import collections
try:
from collections import OrderedDict
except ImportError:
from ..utils.ordereddict import OrderedDict
import math
import six
from six.moves import xrange
import matplotlib.dates as mdates
import matplotlib.font_manager as mfontmgr
import matplotlib.legend as mlegend
import matplotlib.pyplot as mpyplot
import matplotlib.ticker as mticker
from .. import AutoInfoClass, MetaParams, TimeFrame
from .finance import plot_candlestick, plot_ohlc, plot_volume, plot_lineonclose
from .formatters import (MyVolFormatter, MyDateFormatter, getlocator)
from .scheme import PlotScheme
from .utils import tag_box_style
from .multicursor import MultiCursor
class PInfo(object):
def __init__(self, sch):
self.sch = sch
self.nrows = 0
self.row = 0
self.clock = None
self.x = None
self.xlen = 0
self.sharex = None
self.figs = list()
self.cursors = list()
self.daxis = OrderedDict()
self.ldaxis = list()
self.zorder = dict()
self.coloridx = collections.defaultdict(lambda: -1)
self.prop = mfontmgr.FontProperties(size=self.sch.subtxtsize)
def newfig(self, numfig):
fig = mpyplot.figure(numfig)
self.figs.append(fig)
self.daxis = OrderedDict()
self.ldaxis.append(self.daxis)
self.row = 0
self.sharex = None
return fig
def nextcolor(self, ax):
self.coloridx[ax] += 1
return self.coloridx[ax]
def color(self, ax):
return self.sch.color(self.coloridx[ax])
def zordernext(self, ax):
z = self.zorder[ax]
if self.sch.zdown:
return z * 0.9999
return z * 1.0001
def zordercur(self, ax):
return self.zorder[ax]
class Plot(six.with_metaclass(MetaParams, object)):
params = (('scheme', PlotScheme()),)
def __init__(self, **kwargs):
for pname, pvalue in kwargs.items():
setattr(self.p.scheme, pname, pvalue)
def drawtag(self, ax, x, y, facecolor, edgecolor, alpha=0.9, **kwargs):
txt = ax.text(x, y, '%.2f' % y, va='center', ha='left',
fontsize=self.pinf.sch.subtxtsize,
bbox=dict(boxstyle=tag_box_style,
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha),
# 3.0 is the minimum default for text
zorder=self.pinf.zorder[ax] + 3.0,
**kwargs)
def plot(self, strategy, numfigs=1):
if not strategy.datas:
return
self.pinf = PInfo(self.p.scheme)
self.sortdataindicators(strategy)
self.calcrows(strategy)
slen = len(strategy)
d, m = divmod(slen, numfigs)
pranges = list()
for i in xrange(numfigs):
a = d * i
if i == (numfigs - 1):
d += m # add remainder to last stint
b = a + d
pranges.append([a, b, d])
for numfig in xrange(numfigs):
# prepare a figure
fig = self.pinf.newfig(numfig)
self.pinf.pstart, self.pinf.pend, self.pinf.psize = pranges[numfig]
self.pinf.xstart = self.pinf.pstart
self.pinf.xend = self.pinf.pend
self.pinf.clock = strategy._clock
self.pinf.xreal = strategy._clock.datetime.plot(
self.pinf.pstart, self.pinf.psize)
self.pinf.xlen = len(self.pinf.xreal)
self.pinf.x = list(xrange(self.pinf.xlen))
# Do the plotting
# Things that go always at the top (observers)
for ptop in self.dplotstop:
self.plotind(ptop, subinds=self.dplotsover[ptop])
# Create the rest on a per data basis
for data in strategy.datas:
for ind in self.dplotsup[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
self.plotdata(data, self.dplotsover[data])
for ind in self.dplotsdown[data]:
self.plotind(
ind,
subinds=self.dplotsover[ind],
upinds=self.dplotsup[ind],
downinds=self.dplotsdown[ind])
cursor = MultiCursor(
fig.canvas, list(self.pinf.daxis.values()),
useblit=True,
horizOn=True, vertOn=True,
horizMulti=False, vertMulti=True,
horizShared=True, vertShared=False,
color='black', lw=1, ls=':')
self.pinf.cursors.append(cursor)
lastax = list(self.pinf.daxis.values())[-1]
# Date formatting for the x axis - only the last one needs it
if False:
locator = mticker.AutoLocator()
lastax.xaxis.set_major_locator(locator)
# lastax.xaxis.set_major_formatter(MyDateFormatter(self.pinf.xreal))
formatter = mdates.IndexDateFormatter(self.pinf.xreal,
fmt='%Y-%m-%d')
lastax.xaxis.set_major_formatter(formatter)
else:
self.setlocators(strategy._clock)
# Put the subplots as indicated by hspace
fig.subplots_adjust(hspace=self.pinf.sch.plotdist,
top=0.98, left=0.05, bottom=0.05, right=0.95)
# Applying fig.autofmt_xdate if the data axis is the last one
# breaks the presentation of the date labels. why?
# Applying the manual rotation with setp cures the problem
# but the labels from all axis but the last have to be hidden
if False:
fig.autofmt_xdate(bottom=0.25, rotation=0)
elif True:
for ax in self.pinf.daxis.values():
mpyplot.setp(ax.get_xticklabels(), visible=False)
# ax.autoscale_view(tight=True)
mpyplot.setp(lastax.get_xticklabels(),
visible=True,
rotation=self.pinf.sch.tickrotation)
# Things must be tight along the x axis (to fill both ends)
axtight = 'x' if not self.pinf.sch.ytight else 'both'
mpyplot.autoscale(enable=True, axis=axtight, tight=True)
def setlocators(self, data):
ax = list(self.pinf.daxis.values())[-1]
comp = getattr(data, '_compression', 1)
tframe = getattr(data, '_timeframe', TimeFrame.Days)
if tframe == TimeFrame.Years:
fmtmajor = '%Y'
fmtminor = '%Y'
fmtdata = '%Y'
elif tframe == TimeFrame.Months:
fmtmajor = '%Y'
fmtminor = '%b'
fmtdata = '%b'
elif tframe == TimeFrame.Weeks:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%d'
elif tframe == TimeFrame.Days:
fmtmajor = '%b'
fmtminor = '%d'
fmtdata = '%Y-%m-%d'
elif tframe == TimeFrame.Minutes:
fmtmajor = '%d %b'
fmtminor = '%H:%M'
fmtdata = '%Y-%m-%d %H:%M'
fordata = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtdata)
for dax in self.pinf.daxis.values():
dax.fmt_xdata = fordata
locmajor = mticker.AutoLocator()
locminor = mticker.AutoMinorLocator()
ax.xaxis.set_minor_locator(locminor)
ax.xaxis.set_major_locator(locmajor)
formajor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtmajor)
forminor = mdates.IndexDateFormatter(self.pinf.xreal, fmt=fmtminor)
ax.xaxis.set_minor_formatter(forminor)
ax.xaxis.set_major_formatter(formajor)
def calcrows(self, strategy):
# Calculate the total number of rows
rowsmajor = self.pinf.sch.rowsmajor
rowsminor = self.pinf.sch.rowsminor
nrows = 0
# Datas and volumes
nrows += len(strategy.datas) * rowsmajor
if self.pinf.sch.volume and not self.pinf.sch.voloverlay:
nrows += len(strategy.datas) * rowsminor
# top indicators/observers
nrows += len(self.dplotstop) * rowsminor
# indicators above datas
nrows += sum(len(v) for v in self.dplotsup.values())
nrows += sum(len(v) for v in self.dplotsdown.values())
self.pinf.nrows = nrows
def newaxis(self, obj, rowspan):
ax = mpyplot.subplot2grid((self.pinf.nrows, 1), (self.pinf.row, 0),
rowspan=rowspan, sharex=self.pinf.sharex)
# update the sharex information if not available
if self.pinf.sharex is None:
self.pinf.sharex = ax
# update the row index with the taken rows
self.pinf.row += rowspan
# save the mapping indicator - axis and return
self.pinf.daxis[obj] = ax
# Activate grid in all axes if requested
ax.yaxis.tick_right()
ax.grid(self.pinf.sch.grid, which='both')
return ax
def plotind(self, ind,
subinds=None, upinds=None, downinds=None,
masterax=None):
ind._plotinit()
sch = self.p.scheme
# check subind
subinds = subinds or []
upinds = upinds or []
downinds = downinds or []
# plot subindicators on self with independent axis above
for upind in upinds:
self.plotind(upind)
# Get an axis for this plot
ax = masterax or self.newaxis(ind, rowspan=self.pinf.sch.rowsminor)
indlabel = ind.plotlabel()
for lineidx in range(ind.size()):
line = ind.lines[lineidx]
linealias = ind.lines._getlinealias(lineidx)
lineplotinfo = getattr(ind.plotlines, '_%d' % lineidx, None)
if not lineplotinfo:
lineplotinfo = getattr(ind.plotlines, linealias, None)
if not lineplotinfo:
lineplotinfo = AutoInfoClass()
if lineplotinfo._get('_plotskip', False):
continue
# Legend label only when plotting 1st line
if masterax and not ind.plotinfo.plotlinelabels:
label = indlabel * (lineidx == 0) or '_nolegend'
else:
label = lineplotinfo._get('_name', '') or linealias
# plot data
lplot = line.plotrange(self.pinf.xstart, self.pinf.xend)
if not math.isnan(lplot[-1]):
label += ' %.2f' % lplot[-1]
plotkwargs = dict()
linekwargs = lineplotinfo._getkwargs(skip_=True)
if linekwargs.get('color', None) is None:
if not lineplotinfo._get('_samecolor', False):
self.pinf.nextcolor(ax)
plotkwargs['color'] = self.pinf.color(ax)
plotkwargs.update(dict(aa=True, label=label))
plotkwargs.update(**linekwargs)
if ax in self.pinf.zorder:
plotkwargs['zorder'] = self.pinf.zordernext(ax)
pltmethod = getattr(ax, lineplotinfo._get('_method', 'plot'))
plottedline = pltmethod(self.pinf.x, lplot, **plotkwargs)
try:
plottedline = plottedline[0]
except:
# Possibly a container of artists (when plotting bars)
pass
self.pinf.zorder[ax] = plottedline.get_zorder()
if not math.isnan(lplot[-1]):
# line has valid values, plot a tag for the last value
self.drawtag(ax, len(self.pinf.xreal), lplot[-1],
facecolor='white',
edgecolor=self.pinf.color(ax))
# plot subindicators that were created on self
for subind in subinds:
self.plotind(subind, subinds=self.dplotsover[subind], masterax=ax)
if not masterax:
# adjust margin if requested ... general of particular
ymargin = ind.plotinfo._get('plotymargin', 0.0)
ymargin = max(ymargin, self.pinf.sch.yadjust)
if ymargin:
ax.margins(y=ymargin)
# Set specific or generic ticks
yticks = ind.plotinfo._get('plotyticks', [])
if not yticks:
yticks = ind.plotinfo._get('plotyhlines', [])
if yticks:
ax.set_yticks(yticks)
else:
locator = mticker.MaxNLocator(nbins=4, prune='both')
ax.yaxis.set_major_locator(locator)
# Set specific hlines if asked to
hlines = ind.plotinfo._get('plothlines', [])
if not hlines:
hlines = ind.plotinfo._get('plotyhlines', [])
for hline in hlines:
ax.axhline(hline, color=self.pinf.sch.hlinescolor,
ls=self.pinf.sch.hlinesstyle,
lw=self.pinf.sch.hlineswidth)
if self.pinf.sch.legendind and \
ind.plotinfo._get('plotlegend', True):
handles, labels = ax.get_legend_handles_labels()
# Ensure that we have something to show
if labels:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
legend.set_title(indlabel, prop=self.pinf.prop)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
# plot subindicators on self with independent axis below
for downind in downinds:
self.plotind(downind)
def plotvolume(self, data, opens, highs, lows, closes, volumes, label):
if self.pinf.sch.voloverlay:
rowspan = self.pinf.sch.rowsmajor
else:
rowspan = self.pinf.sch.rowsminor
ax = self.newaxis(data.volume, rowspan=rowspan)
if self.pinf.sch.voloverlay:
volalpha = self.pinf.sch.voltrans
else:
volalpha = 1.0
maxvol = volylim = max(volumes)
if maxvol:
# Plot the volume (no matter if as overlay or standalone)
vollabel = label
volplot, = plot_volume(ax, self.pinf.x, opens, closes, volumes,
colorup=self.pinf.sch.volup,
colordown=self.pinf.sch.voldown,
alpha=volalpha, label=vollabel)
nbins = 6
prune = 'both'
if self.pinf.sch.voloverlay:
# store for a potential plot over it
nbins = int(nbins / self.pinf.sch.volscaling)
prune = None
volylim /= self.pinf.sch.volscaling
ax.set_ylim(0, volylim, auto=True)
else:
# plot a legend
handles, labels = ax.get_legend_handles_labels()
if handles:
# Legend done here to ensure it includes all plots
legend = ax.legend(loc=self.pinf.sch.legendindloc,
numpoints=1, frameon=False,
shadow=False, fancybox=False,
prop=self.pinf.prop)
locator = mticker.MaxNLocator(nbins=nbins, prune=prune)
ax.yaxis.set_major_locator(locator)
ax.yaxis.set_major_formatter(MyVolFormatter(maxvol))
if not maxvol:
ax.set_yticks([])
return None
return volplot
def setxdata(self, data):
# only if this data has a master, do something
if data.mlen:
# this data has a master, get the real length of this data
self.pinf.xlen = len(data.mlen)
# find the starting point with regards to master start: pstart
self.pinf.xstart = bisect.bisect_left(
data.mlen, self.pinf.pstart)
# find the ending point with regards to master start: pend
self.pinf.xend = bisect.bisect_right(
data.mlen, self.pinf.pend)
# extract the Xs from the subdata
self.pinf.x = data.mlen[self.pinf.xstart:self.pinf.xend]
# rebase the Xs to the start of the main data point
self.pinf.x = [x - self.pinf.pstart for x in self.pinf.x]
def plotdata(self, data, indicators):
for ind in indicators:
upinds = self.dplotsup[ind]
for upind in upinds:
self.plotind(upind,
subinds=self.dplotsover[upind],
upinds=self.dplotsup[upind],
downinds=self.dplotsdown[upind])
# set the x axis data (if needed)
self.setxdata(data)
opens = data.open.plotrange(self.pinf.xstart, self.pinf.xend)
highs = data.high.plotrange(self.pinf.xstart, self.pinf.xend)
lows = data.low.plotrange(self.pinf.xstart, self.pinf.xend)
closes = data.close.plotrange(self.pinf.xstart, self.pinf.xend)
volumes = data.volume.plotrange(self.pinf.xstart, self.pinf.xend)
vollabel = 'Volume'
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
volplot = self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
axvol = self.pinf.daxis[data.volume]
ax = axvol.twinx()
self.pinf.daxis[data] = ax
else:
ax = self.newaxis(data, rowspan=self.pinf.sch.rowsmajor)
datalabel = ''
dataname = ''
if hasattr(data, '_name') and data._name:
datalabel += data._name
if hasattr(data, '_compression') and \
hasattr(data, '_timeframe'):
tfname = TimeFrame.getname(data._timeframe, data._compression)
datalabel += ' (%d %s)' % (data._compression, tfname)
datalabel += ' O:%.2f H:%2.f L:%.2f C:%.2f' % \
(opens[-1], highs[-1], lows[-1], closes[-1])
if self.pinf.sch.style.startswith('line'):
plotted = plot_lineonclose(
ax, self.pinf.x, closes,
color=self.pinf.sch.loc, label=datalabel)
else:
if self.pinf.sch.style.startswith('candle'):
plotted = plot_candlestick(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
elif self.pinf.sch.style.startswith('bar') or True:
# final default option -- should be "else"
plotted = plot_ohlc(
ax, self.pinf.x, opens, highs, lows, closes,
colorup=self.pinf.sch.barup,
colordown=self.pinf.sch.bardown,
label=datalabel)
self.pinf.zorder[ax] = plotted[0].get_zorder()
# Code to place a label at the right hand side with the last value
self.drawtag(ax, len(self.pinf.xreal), closes[-1],
facecolor='white', edgecolor=self.pinf.sch.loc)
ax.yaxis.set_major_locator(mticker.MaxNLocator(prune='both'))
# make sure "over" indicators do not change our scale
ax.set_ylim(ax.get_ylim())
if self.pinf.sch.volume:
if not self.pinf.sch.voloverlay:
self.plotvolume(
data, opens, highs, lows, closes, volumes, vollabel)
else:
# Prepare overlay scaling/pushup or manage own axis
if self.pinf.sch.volpushup:
# push up overlaid axis by lowering the bottom limit
axbot, axtop = ax.get_ylim()
axbot *= (1.0 - self.pinf.sch.volpushup)
ax.set_ylim(axbot, axtop)
for ind in indicators:
self.plotind(ind, subinds=self.dplotsover[ind], masterax=ax)
handles, labels = ax.get_legend_handles_labels()
if handles:
# put data and volume legend entries in the 1st positions
# because they are "collections" they are considered after Line2D
# for the legend entries, which is not our desire
if self.pinf.sch.volume and self.pinf.sch.voloverlay:
if volplot:
# even if volume plot was requested, there may be no volume
labels.insert(0, vollabel)
handles.insert(0, volplot)
didx = labels.index(datalabel)
labels.insert(0, labels.pop(didx))
handles.insert(0, handles.pop(didx))
# feed handles/labels to legend to get right order
legend = ax.legend(handles, labels,
loc='upper left', frameon=False, shadow=False,
fancybox=False,
prop=self.pinf.prop, numpoints=1, ncol=1)
# hack: if title is set. legend has a Vbox for the labels
# which has a default "center" set
legend._legend_box.align = 'left'
for ind in indicators:
downinds = self.dplotsdown[ind]
for downind in downinds:
self.plotind(downind,
subinds=self.dplotsover[downind],
upinds=self.dplotsup[downind],
downinds=self.dplotsdown[downind])
def show(self):
mpyplot.show()
def sortdataindicators(self, strategy):
# These lists/dictionaries hold the subplots that go above each data
self.dplotstop = list()
self.dplotsup = collections.defaultdict(list)
self.dplotsdown = collections.defaultdict(list)
self.dplotsover = collections.defaultdict(list)
# Sort observers in the different lists/dictionaries
for x in strategy.getobservers():
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
if x.plotinfo.subplot:
self.dplotstop.append(x)
else:
key = getattr(x._clock, 'owner', x._clock)
self.dplotsover[key].append(x)
# Sort indicators in the different lists/dictionaries
for x in strategy.getindicators():
if not hasattr(x, 'plotinfo'):
# no plotting support - so far LineSingle derived classes
continue
if not x.plotinfo.plot or x.plotinfo.plotskip:
continue
# support LineSeriesStub which has "owner" to point to the data
key = getattr(x._clock, 'owner', x._clock)
if getattr(x.plotinfo, 'plotforce', False):
if key not in strategy.datas:
datas = strategy.datas
while True:
if key not in strategy.datas:
key = key._clock
else:
break
if x.plotinfo.subplot:
if x.plotinfo.plotabove:
self.dplotsup[key].append(x)
else:
self.dplotsdown[key].append(x)
else:
self.dplotsover[key].append(x)
| gpl-3.0 |
aborovin/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
eusoubrasileiro/fatiando_seismic | fatiando/seismic/utils.py | 1 | 4028 | r"""
Tools for seismic processing
**Auxiliary functions**
* :func:`~fatiando.seismic.utils.vrms`: Calculate RMS velocity from interval
thickness and velocity (horizontally layered model)
* :func:`~fatiando.seismic.utils.nmo`: Apply nmo correction on a CMP gather
* :func:`~fatiando.seismic.utils.plot_vnmo`: Draw nmo hyperbolas over a CMP gather
**Theory**
Nmo correction based on a simple horizontal layered earth is given by:
.. math::
t^2 = (t_0)^2 + x^2/(v_rms)^2
"""
import numpy as np
from scipy import interpolate
from fatiando.vis import mpl
def nmo(cmp_gather, offsets, vnmo, dt, stretching=0.4):
r"""
Given nmo functions defined by t0 and vrms, apply
the nmo correction on the 2D cmp_gather of traces.
t0 is the time sample
Parameters:
* cmp_gather : 2D-array
traces of this gather from near to far-offset
(nsamples, ntraces)
* offsets : 1D-array
off-sets for each cmp in this gather
in the same sequence as ntraces
* vnmo : 1D-array
velocity parameter of all nmo functions for this cmp gather
must have same size as (nsamples)
* dt : float
sample rate
* stretching: float
percentage of frequency change due nmo stretching acceptable,
above this limited, the trace region is muted
uses delta nmo over t0 as criteria
Returns:
* cmp_nmo : 2D array
nmo corrected cmp traces
Note:
Uses linear interpolation of sample values.
"""
# create output cmp_gather
ns, nx = cmp_gather.shape
cmp_nmo = np.zeros(cmp_gather.shape)
for j in range(nx): # correct each offset
x = offsets[j]
# function to interpolate amplitude values for this trace
interpfunc = interpolate.interp1d(
np.linspace(0, ns*dt, ns), cmp_gather[:, j])
for i in range(ns): # for each (t0, vnmo) hyperbola of this trace
t0 = i*dt
t = np.sqrt(t0**2+(x/vnmo[i])**2)
if t > ns*dt: # maximum time to correct
continue
if stretching is not None:
# dtnmo/t0 equivalent to df/f frequency distortion Oz. Yilmaz
if (t-t0)/t0 > stretching:
# will remain zero
continue
cmp_nmo[i, j] = interpfunc(t)
return cmp_nmo
def vrms(vi, ds):
"""
Calculate RMS (Root Mean Square) velocity from interval thickness
and velocity (horizontally layered model)
* vi : ndarray
interval velocity
* ds : ndarray
layer size
return Rms velocity array
"""
twt = 2*(ds/vi) # two way time
vi2_twt = (vi**2)*twt
return np.sqrt(vi2_twt.cumsum()/twt.cumsum())
def plot_vnmo(cmp_gather, offsets, vnmo, dt, inc=70,
vmin=None, vmax=None, aspect='auto'):
r"""
Given nmo functions defined by t0 and vnmo, draw
it over the specified gather using `seismic_image`
Parameters:
* cmp_gather : 2D-array
traces of this gather from near to far-offset
(nsamples, ntraces)
* offsets : 1D-array
off-sets for each cmp in this gather
in the same sequence as ntraces
* vnmo : 1D-array
velocity parameter of all nmo functions for this cmp gather
must have same size as (nsamples)
* dt : float
sample rate
* inc: int
plotting option, step in time samples between hyperbolas
to avoid overlapping totally the seismic image bellow
* vmin, vmax : float
min and max values for imshow
* aspect : float
matplotlib imshow aspect parameter, ratio between axes
Returns:
* cmp_nmo : 2D array
nmo corrected cmp traces
"""
ns, nx = cmp_gather.shape
for i in range(0, ns, inc): # each (t0, vnmo) hyperbola
t0 = i*dt
t = np.sqrt(t0**2+(offsets/vnmo[i])**2)
mpl.seismic_image(cmp_gather, dt, aspect=aspect, vmin=vmin, vmax=vmax)
mpl.plot(range(nx), t, '+b') | bsd-3-clause |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
sgenoud/scikit-learn | examples/plot_permutation_test_for_classification.py | 5 | 2319 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD
print __doc__
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
from sklearn.metrics import zero_one_score
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(svm, X, y,
zero_one_score, cv=cv,
n_permutations=100, n_jobs=1)
print "Classification score %s (pvalue : %s)" % (score, pvalue)
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#pl.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
pl.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause |
toddheitmann/PetroPy | examples/wolfcamp_bulk.py | 1 | 8772 | """
=========================================
Wolfcamp Example - Bulk Process las files
=========================================
This example shows the full petrophysical workflow avaiable in PetroPy
for a folder of wolfcamp las file courtesy of University Lands Texas.
The workflow progresses in 3 iterations to edit data, calculate the
petrophysical model, and to calculate statistics and save. To better
understand the worflow first read through the `single well`_ example.
.. _single well: wolfcamp_single.html
The First Iteration
1. Read las file and create a :class:`petropy.Log` object
2. Create a :class:`petropy.LogViewer` show in edit_mode to fix data
3. Writes updated data to new folder via :meth:`petropy.Log.write`
The Second Iteration
1. Read las file and create a :class:`petropy.Log` object
2. Load tops from a csv file using :meth:`petropy.Log.tops_from_csv`
3. Calculate fluid properties by
1. Loading parameters via :meth:`petropy.Log.fluid_properties_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_fluid_properties`
4. Calculate mulitmineral properties by
1. Loading parameters via :meth:`petropy.Log.multimineral_parameters_from_csv`
2. Calculating over formations via :meth:`petropy.Log.formation_multimineral_model`
5. Curve summations via :meth:`petropy.Log.summations`
6. Adding pay flags via :meth:`petropy.Log.add_pay_flag`
After the second iteration, the list processed_logs is used to perform
electrofacies calculations via :meth:`petropy.electrofacies`
The Third Iteration
1. Exporting log statistics via :meth:`petropy.Log.statistics`
2. Saving LogViewer to png and Log to las
Downloading the script at the bottom of this webpage will not download the required las
files or PetroPy logo. To download all files, view the `examples folder`_ on GitHub.
.. _examples folder: https://github.com/toddheitmann/PetroPy/tree/master/examples
"""
import petropy as ptr
# use glob to get file paths
import glob
# import pyplot to add logo to figure
import matplotlib.pyplot as plt
# define parameters #
f = ['WFMPA', 'WFMPB', 'WFMPC']
sum_curves = ['OIP', 'BVH', 'PHIE']
stats_curves = ['OIP', 'BVH', 'PHIE', 'SW', 'VCLAY', 'TOC']
flag_1_gtoe = [('PHIE', 0.03)]
flag_2_gtoe = [('PAY_FLAG_1', 1), ('BVH', 0.02)]
flag_3_gtoe = [('PAY_FLAG_2', 1)]
flag_3_ltoe = [('SW', 0.2)]
# get paths for all las files in folder #
# wfmp_raw_las #
raw_las_folder = r'wfmp_raw_las/'
las_files = glob.glob(raw_las_folder + r'*.las')
### First Iteration ###
for las_file in las_files:
### 1. read raw las file ###
# create a Log object by reading a file path #
log = ptr.Log(las_file)
### 2. graphically edit raw log ###
# use manual mode for fixing borehole washout #
# and other changes requiring redrawing data #
# use bulk shift mode to linearly adjust all #
# curve data #
# close both windows to continue program #
viewer = ptr.LogViewer(log, top = 6950, height = 100)
viewer.show(edit_mode = True)
# overwrite log variable with updated log #
# from LogViewer edits #
log = viewer.log
# find way to name well, looking for well name#
# or UWI or API #
if len(log.well['WELL'].value) > 0:
well_name = log.well['WELL'].value
elif len(log.well['UWI'].value) > 0:
well_name = str(log.well['UWI'].value)
elif len(log.well['API'].value) > 0:
well_name = str(log.well['API'].value)
else:
well_name = 'UNKNOWN'
well_name = well_name.replace('.', '')
# save changes to edited folder in case #
# reprocessing after editing is needed #
edited_log_file_name = 'wfmp_edited_las/%s_edited.las' % well_name
# log.write(edited_log_file_name)
### Second Iteration ###
edited_file_folder = r'wfmp_edited_las/'
edited_las_files = glob.glob(edited_file_folder + r'*.las')
# create list to hold processed log objects #
processed_logs = []
for las_file in edited_las_files:
### 1. Read las file ###
log = ptr.Log(las_file)
### 2. load tops ###
log.tops_from_csv('wfmp_raw_las/wfmp_tops.csv')
### 3. fluid properties ###
# load fluid properties from a csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.fluid_properties_parameters_from_csv()
# calculate fluid properties over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_fluid_properties(f, parameter = 'WFMP')
### 4. multimineral model ###
# load multimineral parameters from csv file #
# since path is not specified, load default #
# csv file included with petropy #
log.multimineral_parameters_from_csv()
# calculate multiminearl model over defined #
# formations with parameter WFMP from #
# previously loaded csv #
log.formation_multimineral_model(f, parameter = 'WFMP')
### 5. summations ###
log.summations(f, curves = sum_curves)
### 6. pay flags ###
log.add_pay_flag(f, greater_than_or_equal = flag_1_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_2_gtoe)
log.add_pay_flag(f, greater_than_or_equal = flag_3_gtoe,
less_than_or_equal = flag_3_ltoe)
# add log to processed_logs list #
processed_logs.append(log)
### 9. electrofacies ###
# define curves to use in electofaceis module #
electro_logs = ['GR_N', 'RESDEEP_N', 'NPHI_N', 'RHOB_N', 'PE_N']
# specifiy to use logarithmically scaled RESDEEP_N #
scaled = ['RESDEEP_N']
# calculate electrofacies for the processed logs #
final_logs = ptr.electrofacies(processed_logs, f,
electro_logs, 6, log_scale = scaled)
### Third Iteration ###
pay_flags = ['PAY_FLAG_1', 'PAY_FLAG_2', 'PAY_FLAG_3']
facies_curves = ['FACIES']
for log in final_logs:
### 1. export statistics ###
log.statistics_to_csv('wfmp_processed_las/wfmp_statistics.csv',
replace = True, formations = f,
curves = stats_curves, pay_flags = pay_flags,
facies = facies_curves)
### 2. export data ###
# find way to name well, looking for well name#
# or UWI or API #
if len(log.well['WELL'].value) > 0:
well_name = log.well['WELL'].value
elif len(str(log.well['UWI'].value)) > 0:
well_name = str(log.well['UWI'].value)
elif len(log.well['API'].value) > 0:
well_name = str(log.well['API'].value)
else:
well_name = 'UNKNOWN'
well_name = well_name.replace('.', '')
# scale height of viewer to top and bottom #
# of calculated values #
wfmpa_top = log.tops['WFMPA']
wfmpc_base = log.next_formation_depth('WFMPC')
top = wfmpa_top
height = wfmpc_base - wfmpa_top
# create LogViewer with the default full_oil #
# template included in petropy #
viewer = ptr.LogViewer(log, top = top, height = height,
template_defaults = 'full_oil')
# set viewer to 17x11 inches size for use in #
# PowerPoint or printing to larger paper #
viewer.fig.set_size_inches(17, 11)
# add well_name to title of LogViewer #
viewer.fig.suptitle(well_name, fontweight = 'bold', fontsize = 30)
# add logo to top left corner #
logo_im = plt.imread('company_logo.png')
logo_ax = viewer.fig.add_axes([0, 0.85, 0.2, 0.2])
logo_ax.imshow(logo_im)
logo_ax.axis('off')
# add text to top right corner #
if len(str(log.well['UWI'].value)) > 0:
label = 'UWI: ' + str(log.well['UWI'].value) + '\n'
elif len(log.well['API'].value) > 0:
label = 'API: ' + str(log.well['API'].value) + '\n'
else:
label = ''
label += 'County: Reagan\nCreated By: Todd Heitmann\n'
label += 'Creation Date: October 23, 2017'
viewer.axes[0].annotate(label, xy = (0.99,0.99),
xycoords = 'figure fraction',
horizontalalignment = 'right',
verticalalignment = 'top',
fontsize = 14)
# save figure and log #
viewer_file_name=r'wfmp_processed_las/%s_processed.png' % well_name
las_file_name = r'wfmp_processed_las/%s_processed.las' % well_name
viewer.fig.savefig(viewer_file_name)
viewer.log.write(las_file_name)
| mit |
oscarxie/tushare | tushare/datayes/basics.py | 14 | 3722 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 2015年7月4日
@author: JimmyLiu
@QQ:52799046
"""
from tushare.datayes import vars as vs
import pandas as pd
from pandas.compat import StringIO
class Basics():
def __init__(self , client):
self.client = client
def dy_master_secID(self, ticker='000001', partyID='',
cnSpell='', assetClass='', field=''):
"""
证券编码及基本上市信息
getSecID
输入一个或多个证券交易代码,获取证券ID,证券在数据结构中的一个唯一识别的编码;
同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。
"""
code, result = self.client.getData(vs.SEC_ID%(ticker, partyID,
cnSpell, assetClass, field))
return _ret_data(code, result)
def dy_master_tradeCal(self, exchangeCD='XSHG,XSHE', beginDate='',
endDate='', field=''):
"""
交易所交易日历
getTradeCal
输入交易所,选取日期范围,可查询获取日历日期当天是否开市信息
"""
code, result = self.client.getData(vs.TRADE_DATE%(exchangeCD, beginDate,
endDate, field))
return _ret_data(code, result)
def dy_master_equInfo(self, ticker='wx', pagesize='10',
pagenum='1', field=''):
"""
沪深股票键盘精灵
getEquInfo
根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。
"""
code, result = self.client.getData(vs.EQU_INFO%(ticker, pagesize,
pagenum, field))
return _ret_data(code, result)
def dy_master_region(self, field=''):
"""
获取中国地域分类,以行政划分为标准。
getSecTypeRegion
"""
code, result = self.client.getData(vs.REGION%(field))
return _ret_data(code, result)
def dy_master_regionRel(self, ticker='', typeID='',
secID='', field=''):
"""
获取沪深股票地域分类,以注册地所在行政区域为标准。
getSecTypeRegionRel
"""
code, result = self.client.getData(vs.REGION_REL%(ticker, typeID,
secID, field))
return _ret_data(code, result)
def dy_master_secType(self, field=''):
"""
证券分类列表
一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型;
可一次获取全部分类。
getSecType
"""
code, result = self.client.getData(vs.SEC_TYPE%(field))
return _ret_data(code, result)
def dy_master_secTypeRel(self, ticker='', typeID='101001004001001',
secID='', field=''):
"""
录证券每个分类的成分,证券分类可通过在getSecType获取。
getSecTypeRel
"""
code, result = self.client.getData(vs.SEC_TYPE_REL%(ticker, typeID,
secID, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
deepfield/ibis | ibis/sql/postgres/tests/test_client.py | 1 | 3736 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import pandas as pd
from ibis.tests.util import assert_equal
import ibis.expr.types as ir
import ibis
pytest.importorskip('sqlalchemy')
pytest.importorskip('psycopg2')
pytestmark = pytest.mark.postgresql
POSTGRES_TEST_DB = os.environ.get('IBIS_TEST_POSTGRES_DATABASE',
'ibis_testing')
IBIS_POSTGRES_HOST = os.environ.get('IBIS_TEST_POSTGRES_HOST',
'localhost')
IBIS_POSTGRES_USER = os.environ.get('IBIS_TEST_POSTGRES_USER',
'postgres')
IBIS_POSTGRES_PASS = os.environ.get('IBIS_TEST_POSTGRES_PASSWORD',
'postgres')
def test_table(alltypes):
assert isinstance(alltypes, ir.TableExpr)
def test_array_execute(alltypes):
d = alltypes.limit(10).double_col
s = d.execute()
assert isinstance(s, pd.Series)
assert len(s) == 10
def test_literal_execute(con):
expr = ibis.literal('1234')
result = con.execute(expr)
assert result == '1234'
def test_simple_aggregate_execute(alltypes):
d = alltypes.double_col.sum()
v = d.execute()
assert isinstance(v, float)
def test_list_tables(con):
assert len(con.list_tables()) > 0
assert len(con.list_tables(like='functional')) == 1
def test_compile_verify(alltypes):
unsupported_expr = alltypes.double_col.approx_median()
assert not unsupported_expr.verify()
supported_expr = alltypes.double_col.sum()
assert supported_expr.verify()
def test_database_layer(con, alltypes):
db = con.database()
t = db.functional_alltypes
assert_equal(t, alltypes)
assert db.list_tables() == con.list_tables()
db_schema = con.schema("information_schema")
assert db_schema.list_tables() != con.list_tables()
def test_compile_toplevel():
t = ibis.table([('foo', 'double')], name='t0')
# it works!
expr = t.foo.sum()
result = ibis.postgres.compile(expr)
expected = "SELECT sum(t0.foo) AS sum \nFROM t0 AS t0" # noqa
assert str(result) == expected
def test_list_databases(con):
assert POSTGRES_TEST_DB is not None
assert POSTGRES_TEST_DB in con.list_databases()
def test_list_schemas(con):
assert 'public' in con.list_schemas()
assert 'information_schema' in con.list_schemas()
def test_metadata_is_per_table():
con = ibis.postgres.connect(
host=IBIS_POSTGRES_HOST,
database=POSTGRES_TEST_DB,
user=IBIS_POSTGRES_USER,
password=IBIS_POSTGRES_PASS,
)
assert len(con.meta.tables) == 0
# assert that we reflect only when a table is requested
t = con.table('functional_alltypes') # noqa
assert 'functional_alltypes' in con.meta.tables
assert len(con.meta.tables) == 1
def test_schema_table():
con = ibis.postgres.connect(
host=IBIS_POSTGRES_HOST,
database=POSTGRES_TEST_DB,
user=IBIS_POSTGRES_USER,
password=IBIS_POSTGRES_PASS,
)
# ensure that we can reflect the information schema (which is guaranteed
# to exist)
schema = con.schema('information_schema')
assert isinstance(schema['tables'], ir.TableExpr)
| apache-2.0 |
flightgong/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 3 | 3357 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.scorer import SCORERS
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
loss_func=zero_one_loss)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = SCORERS['accuracy']
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
| bsd-3-clause |
pllim/astropy | examples/io/plot_fits-image.py | 11 | 1898 | # -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
| bsd-3-clause |
sho-87/python-machine-learning | Linear Regression/multiple_regression.py | 1 | 1719 | # Basic OLS regression using base python (and numpy)
import numpy as np
import matplotlib.pyplot as plt
# Load data
dataset = np.genfromtxt('../data/regression_heart.csv', delimiter=",")
x = dataset[:,1:]
y = dataset[:,0]
y = np.reshape(y, (y.shape[0],1)) # Reshape to a column vector
# Scale (standardize) data for smoother gradient descent
x = (x - np.mean(x, axis=0)) / np.std(x, axis=0)
x = np.insert(x, 0, 1, axis=1) # Add 1's for bias
# Learning parameters
alpha = 0.01 # Learning rate
iterations = 1000
# Notes
# Cost function for linear regression: 1/2m(sum((theta0 + theta1(x) - y) ^ 2))
# Partial derivative wrt theta0: 1/m(sum(theta0 + theta1 - y))
# Partial derivative wrt theta1: 1/m(sum(theta0 + theta1 - y)) * x
# Parameter update: theta = theta - alpha(partial derivative)
# Training
theta = np.ones((x.shape[1],1)) # Initial weights
m = y.shape[0] # Number of training examples. Equivalent to X.shape[0]
cost_history = np.zeros(iterations) # Initialize array of cost history values
for i in xrange(iterations): # Batch gradient descent
residuals = np.dot(x, theta) - y
squared_error = np.dot(residuals.T, residuals)
cost = 1.0/(2*m) * squared_error # Quadratic loss
gradient = 1.0/m * np.dot(residuals.T, x).T # Calculate derivative
theta -= (alpha * gradient) # Update weights
cost_history[i] = cost # Store the cost for this iteration
if (i+1) % 100 == 0:
print "Iteration: %d | Cost: %f" % (i+1, cost)
# Plot training curve
plt.plot(range(1, len(cost_history)+1), cost_history)
plt.grid(True)
plt.xlim(1, len(cost_history))
plt.ylim(0, max(cost_history))
plt.title("Training Curve")
plt.xlabel("Iteration #")
plt.ylabel("Cost")
| mit |
ishank08/scikit-learn | sklearn/linear_model/bayes.py | 14 | 19671 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
satiros12/MUIA | META/GS/META2.py | 1 | 13340 | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import SA
import time
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = (16.0, 10.0)
import itertools
#Default maximization
class SimulatedAnnealing:
def __init__(self):
self.T_initial, self.T_final, self.K_steps, self.T_current, self.step, self.alpha, slef.F_current, self.F_new = [None]*8
self.Number_steps, self.error_percentage, self.best_F, self.Last_k_Fs, self.accepted_percentage= [None]*5
self.k, self.acepted_in_last_k_steps, self.cutoff, self.number_cutoff_acepted_solutions = [None]*4
self.TU = None
self.P = 0
self.END = []
def linearTimmeUpdate(T_initial, T_final, K_steps, T_current):
Betta = (T_initial - T_final*1.0) / (K_steps*1.0* T_initial * T_final)
return T_current*1.0/(1.0 + Betta*T_current)
def geometriclTimmeUpdate(T_initial, step, alpha=0.95):
#Betta = #(T_initial - T_final*1.0) / (K_steps*1.0* T_initial * T_final)
return (alpha**step) * T_initial # T_current*1.0/(1.0 + Betta*T_current)
def probabilityOfSelection(T_current, F_current, F_new):
A= min([1.0, np.exp(- (F_new*1.0-F_current)*1.0/T_current)]) > np.random.rand() if F_new > F_current else True
#print A,F_new > F_current,np.exp(- (F_new*1.0-F_current)*1.0/T_current)
return A
def endCondiction_error_modification(Number_steps, error_percentage, best_F, Last_k_Fs):
#print (min(Last_k_Fs) - best_F),error_percentage
return np.abs(min(Last_k_Fs) - best_F)/np.abs(best_F) > error_percentage #error_percentage*1.0 * Number_steps
def endCondiction_accepted_percentage(Number_steps, accpeted_percentage, acepted_in_last_k_steps):
return accpeted_percentage*Number_steps < acepted_in_last_k_steps
def endCondiction_cutoffs(Number_steps, cutoff, number_aceoted_solutions):
return cutoff*Number_steps < number_aceoted_solutions
def endCondiction_MaxSteps(Number_steps, step):
return Number_steps < step
def fo(x): return np.cos(x*1.0)*1.0/x
def SelectTemperaturesRange(Initial_percentage_aceptance, Final_percentage_aceptance, Test_times, Objective_function, Restrictions, Envirnoment, verbose=False):
Initial_points = np.random.rand(Test_times) * Restrictions[1] + Restrictions[0]
Perturbations = np.random.rand(Test_times) * Envirnoment*2 - Envirnoment
New_points = Initial_points + Perturbations
Correct_perturbed_1 = np.where(New_points >= Restrictions[0])[0]
Correct_perturbed_2 = np.where(New_points <= Restrictions[1])[0]
Correct_perturbed = np.intersect1d(Correct_perturbed_1,Correct_perturbed_2)
#filter(lambda x : x >= Restrictions[0] and x <= Restrictions[1] ,
#print New_points[Correct_perturbed], len(Correct_perturbed)
Non_improvements = np.where(Objective_function(Initial_points[Correct_perturbed]) < Objective_function(New_points[Correct_perturbed]))[0]
#print Objective_function(Initial_points[Non_improvements]) < Objective_function(New_points[Non_improvements])
#print len(Non_improvements)
MEAN_DIF = np.mean( np.abs(Objective_function(Initial_points[Correct_perturbed][Non_improvements]) - Objective_function(New_points[Correct_perturbed][Non_improvements])) )
Initial_temperature = -MEAN_DIF/ np.log(Initial_percentage_aceptance)
Final_temperature = -MEAN_DIF/ np.log(Final_percentage_aceptance)
if verbose:
RES2 = SelectTemperaturesRange(Initial_percentage_aceptance, Final_percentage_aceptance, Test_times, Objective_function, Restrictions, Envirnoment, verbose=False)
print "In ",Test_times," tests, with envirnoment : ",Envirnoment
print "Mean difference of non improvement perturbations : ", MEAN_DIF
print "Intial temperature for a ", Initial_percentage_aceptance, "% of aceptance : ",Initial_temperature
print "Intial percentage optained for other Sample :", np.exp(-RES2[2] / Initial_temperature)
print "Final temperature for a ", Final_percentage_aceptance, "% of aceptance : ",Final_temperature
print "Final percentage optained for other Sample:", np.exp(-RES2[2] / Final_temperature)
return Initial_temperature,Final_temperature, MEAN_DIF
#Simulated Annealing Continuous
def SIM_AN(
MAX_ITERATIONS=10000,
InitialPoint = np.random.rand()*30.0,
EnvirnomentRange = 1,
objectiveFunction = fo,
restrictions=[10**-100,30],
Intial_percentage =0.999,
Final_percentage =0.3,
InitialTemperature=100,
FinalTemperature = 1000,
USE_TEMPERATURE=False,
alpha = 0.99,
TEMPERATURE_EVOLUTION=0,
debug=False,
verbose=2,
plotose=False,
#if probabilityFunction == None : probabilityFunction=lambda a,b,T : probabilityOfSelection(T,b,a)
accpeted_percentage=1.0,
error_percentage=10000.0,
k1= 500,
k2=500,
cutoff =1.0,
UP = 0.1,
LL = 0.01,
REC = 0.01,
DES = 0.001,
Ec = 0.01,
Metodo_de_aceptacion = 0
#0 : Probabilidad
#1 : Umbral
#2 : Gran Diluvio
#3 : Recuerdo del recuerdo del viaje
#4 : Microcacanonic annealing method
):
if not USE_TEMPERATURE :
InitialTemperature,FinalTemperature, Mean_RE = SelectTemperaturesRange(Intial_percentage,Final_percentage,100,fo,[10**-4,30],1, verbose=verbose)
CurrentTempreature = InitialTemperature
Current_solution = [InitialPoint , objectiveFunction(InitialPoint)]
Best_Solution = [InitialPoint , objectiveFunction(InitialPoint)]
if debug:
OutOfRangeRandoms = 0
BEST_Acumulative_values = [Best_Solution[-1]]
Acumulative_values_prefered = [Current_solution[-1]]
Acumulative_values_tested = [Current_solution[-1]]
BEST_Acumulative_sol = [Best_Solution[0]]
Acumulative_sol_prefered = [Current_solution[0]]
Acumulative_sol_tested = [Current_solution[0]]
InitialTime = time.time()
Step = 0
k_buffer_acceptance=[]
k_buffer_F=[]
Total_aceptance_count = 0
Ecs,LLs,RECs,Ts = [],[],[],[]
while True:
Step += 1
StepTime = time.time()
Ecs += [Ec]
LLs += [LL]
RECs += [REC]
Ts += [CurrentTempreature]
while True: #Solo cogemos datos restringidos
RandomPoint = np.random.rand()*2.0 - 1.0
New_solution = Current_solution[0] + RandomPoint*EnvirnomentRange
#print New_solution
if restrictions[0] == None or restrictions[0] <= New_solution:
if restrictions[1] == None or restrictions[1] >= New_solution:
break
if debug : OutOfRangeRandoms+=1
New_value = objectiveFunction(New_solution)
if debug: print "Sol ",Step,":",New_solution,New_value, " <-> ",Current_solution[0],Current_solution[1]
#print New_value, New_solution, Current_solution
Update_current_solution = False
RES_TP = [(Metodo_de_aceptacion == 0 and probabilityOfSelection(CurrentTempreature, Current_solution[1], New_value))
,(Metodo_de_aceptacion == 1 and np.abs(New_value - Current_solution[1]) > CurrentTempreature)
,(Metodo_de_aceptacion == 2 and (New_value) > LL)
,(Metodo_de_aceptacion == 3 and (New_value) > (REC-DES))
,(Metodo_de_aceptacion == 4 and np.abs(New_value - Current_solution[1]) < Ec)]
#print RES_TP
Update_current_solution = sum(RES_TP)
#if Metodo_de_aceptacion == 0 : Update_current_solution = probabilityOfSelection(CurrentTempreature, Current_solution[1], New_value)
#if Metodo_de_aceptacion == 1 : Update_current_solution = np.abs(New_value - Current_solution[1]) > CurrentTempreature
#if Metodo_de_aceptacion == 2 : Update_current_solution = (New_value) > LL
#if Metodo_de_aceptacion == 3 : Update_current_solution = (New_value) > (REC-DES)
#if Metodo_de_aceptacion == 4 : Update_current_solution = np.abs(New_value - Current_solution[1]) < Ec
#if Update_current_solution == 0 : print " +++ SI +++"
#print Metodo_de_aceptacion,Update_current_solution
#print probabilityOfSelection(CurrentTempreature, Current_solution[1], New_value),np.abs(New_value - Current_solution[1]) > CurrentTempreature,(New_value) > LL,(New_value) > (REC-DES),np.abs(New_value - Current_solution[1]) < Ec
if New_value > REC:
REC = New_value
if Update_current_solution:
Ec = Ec - np.abs(New_value - Current_solution[1])
Current_solution[0] = New_solution
Current_solution[1] = New_value
Total_aceptance_count += 1
LL += UP
if Best_Solution[1] > Current_solution[1]:
Best_Solution[0] = Current_solution[0]
Best_Solution[1] = Current_solution[1]
#termination conditions
if Step > k1 :
k_buffer_acceptance[:-1] = k_buffer_acceptance[1:]
k_buffer_acceptance[-1] = 1 if New_value == Current_solution[1] else 0
else:
k_buffer_acceptance.append(1 if New_value == Current_solution[1] else 0)
if Step > k2 :
k_buffer_F[:-1] = k_buffer_F[1:]
k_buffer_F[-1] = New_value
else:
k_buffer_F.append(New_value)
BEST_Acumulative_values += [Best_Solution[1]]
Acumulative_values_prefered += [Current_solution[1]]
Acumulative_values_tested += [New_value]
BEST_Acumulative_sol += [Best_Solution[0]]
Acumulative_sol_prefered += [Current_solution[0]]
Acumulative_sol_tested += [New_solution]
#print "Current Temp",CurrentTempreature
if TEMPERATURE_EVOLUTION == 0:
CurrentTempreature = linearTimmeUpdate(InitialTemperature, FinalTemperature, MAX_ITERATIONS, CurrentTempreature)
if TEMPERATURE_EVOLUTION == 1:
CurrentTempreature = geometriclTimmeUpdate(InitialTemperature, Step,alpha)
if plotose :
Acumulative_values.append(Best_Solution[-1])
if verbose == 1:
print "Step : ",Step," Duration:",(time.time() - StepTime) , "Temperature:",CurrentTempreature," Solution F: ",Current_solution[1]," Best solution F: ",Best_Solution[1]
if endCondiction_MaxSteps(MAX_ITERATIONS, Step):
if verbose : print "End by Max Steps"
break
if Step > k1 :
if endCondiction_accepted_percentage(MAX_ITERATIONS, accpeted_percentage, sum(k_buffer_acceptance)):
if verbose : print "End by Accepted Percentage",MAX_ITERATIONS
break
if Step > k2 :
if endCondiction_error_modification(MAX_ITERATIONS, error_percentage, Best_Solution[1], k_buffer_F):
if verbose : print "End by Error Modification"
break
if endCondiction_cutoffs(MAX_ITERATIONS, cutoff, Total_aceptance_count):
if verbose : print "End by CUTOFFS"
break
if plotose :
plt.plot(Acumulative_values)
plt.title("Best" + str(Best_Solution[1]))
plt.show()
if verbose > 0:
TotalTime = (time.time()-InitialTime)
print "Total time:" ,TotalTime, " Time per step", TotalTime*1.0/MAX_ITERATIONS,"Steps:",Step, "Best solution : ",Best_Solution[1]
OTHER_OUTPUT= []
OTHER_OUTPUT += [[BEST_Acumulative_sol,BEST_Acumulative_values]]
OTHER_OUTPUT += [[Acumulative_sol_prefered, Acumulative_values_prefered]]
OTHER_OUTPUT += [[Acumulative_sol_tested, Acumulative_values_tested]]
OTHER_OUTPUT += [Ecs,LLs,RECs,Ts]
return Best_Solution,Step, OTHER_OUTPUT
def plot_results(ALGO_RESULT,SHOW_IMG=True, SAVE_FILE=False, File_name="GS"):
plt.plot(RES[2][0][1],"r")
plt.plot(RES[2][1][1],"b")
plt.plot(RES[2][2][1],"g")
plt.xlim(0,len(RES[2][0][1]))
plt.title("Evolution of : F(x) = cos(x)/x")
plt.xlabel("Number of steps")
plt.ylabel("f")
if SAVE_FILE :plt.savefig(File_name+'_EVF.png')
if SHOW_IMG: plt.show()
plt.close()
plt.plot(RES[2][0][0],"r")
plt.plot(RES[2][1][0],"b")
plt.plot(RES[2][2][0],"g")
plt.xlim(0,len(RES[2][0][1]))
plt.title("Evolution of : x")
plt.xlabel("Number of steps")
plt.ylabel("x")
if SAVE_FILE :plt.savefig(File_name+'_EVX.png')
if SHOW_IMG: plt.show()
plt.close()
plt.plot(RES[2][-1],"r")
plt.xlim(0,len(RES[2][0][1]))
#plt.hlines(2.89255277615,0,Step)
plt.title("Evolution of : Temperature")
plt.xlabel("Number of steps")
plt.ylabel("Temperature")
if SAVE_FILE :plt.savefig(File_name+'_EVT.png')
if SHOW_IMG: plt.show()
plt.close()
plt.plot(RES[2][-2],"r")
plt.xlim(0,len(RES[2][0][1]))
plt.title("Evolution of : Records")
plt.xlabel("Number of steps")
plt.ylabel("Records")
if SAVE_FILE :plt.savefig(File_name+'_EVREC.png')
if SHOW_IMG: plt.show()
plt.close()
plt.plot(RES[2][-3],"r")
plt.xlim(0,len(RES[2][0][1]))
plt.title("Evolution of : Rain")
plt.xlabel("Number of steps")
plt.ylabel("Rain")
if SAVE_FILE :plt.savefig(File_name+'_EVLL.png')
if SHOW_IMG: plt.show()
plt.close()
plt.plot(RES[2][-4],"r")
plt.xlim(0,len(RES[2][0][1]))
plt.title("Evolution of : Ec")
plt.xlabel("Number of steps")
plt.ylabel("Ec")
if SAVE_FILE :plt.savefig(File_name+'_EVEc.png')
if SHOW_IMG: plt.show()
plt.close() | apache-2.0 |
sappo/simindex | tests/test_engine.py | 1 | 9171 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_simindex
----------------------------------
Tests for `simindex` module.
"""
import os
import pytest
from pprint import pprint
import tempfile
import pandas as pd
from .testprofiler import profile
from .testdata import restaurant_records
import simindex.helper as hp
from simindex import SimEngine, MDySimII, MDySimIII, MDyLSH
from simindex import DisjunctiveBlockingScheme, WeakLabels, SimLearner
# @profile(follow=[SimEngine.fit,
# WeakLabels.predict,
# WeakLabels.tfidf_similarity,
# DisjunctiveBlockingScheme.feature_vector,
# DisjunctiveBlockingScheme.transform])
# @profile(follow=[MDySimII.insert, MDySimII.query,
# MDySimIII.insert, MDySimIII.query])
# def test_engine(verbose):
# engine = SimEngine("ferbl-90k", indexer=MDySimIII, verbose=verbose)
# print("Fit")
# engine.fit_csv("../../master_thesis/datasets/febrl/ferbl-90k-10k-1_train.csv",
# ["rec_id", "given_name", "surname", "state", "suburb"])
# print("Build")
# engine.build_csv("../../master_thesis/datasets/febrl/ferbl-90k-10k-1_index.csv",
# ["rec_id", "given_name", "surname", "state", "suburb"])
# print(engine.indexer.frequency_distribution())
# print("Query")
# engine.query_csv("../../master_thesis/datasets/febrl/ferbl-90k-10k-1_train_query.csv",
# ["rec_id", "given_name", "surname", "state", "suburb"])
# print("Results")
# gold_csv = "../../master_thesis/datasets/febrl/ferbl-90k-10k-1_train_gold.csv"
# engine.read_ground_truth(gold_standard=gold_csv, gold_attributes=["id_1", "id_2"])
# print("Pair completeness:", engine.pair_completeness())
# print("Reduction ratio:", engine.reduction_ratio())
# @profile(follow=[DisjunctiveBlockingScheme.transform])
def test_engine_restaurant(verbose):
# Expected results
# @Warning: Testing blocking scheme and similiarites doesn't work as the
# result vary due to randomness
blocking_scheme_expected = {(0, 'term_id'),
(2, 'tokens'),
(1, 'term_id'),
(0, 'tokens')}
sim_strings_expected = ['SimDamerau', 'SimLevenshtein', 'SimDamerau',
'SimDamerau', 'SimRatio']
# for indexer in [MDyLSH, MDySimII, MDySimIII]:
for indexer in [MDySimIII]:
print()
print("--------------------------------------------------------------")
print("Testing fresh engine (%s) on restaurant dataset:" % indexer.__name__)
# Test with fresh engine
engine = SimEngine("restaurant", indexer=indexer, verbose=verbose,
max_bk_conjunction=2,
max_positive_labels=111, max_negative_labels=333)
gold_csv = "../../master_thesis/datasets/restaurant/restaurant_train_gold.csv"
engine.read_ground_truth(gold_standard=gold_csv, gold_attributes=["id_1", "id_2"])
# Clean any leftovers from failed tests, verbose=verbose
restaurant_cleanup(engine)
engine.fit_csv("../../master_thesis/datasets/restaurant/restaurant_train.csv",
["id","name","addr","city","phone","type"])
print("Labels P(%d), N(%d), fP(%d), fN(%d)" %
(engine.nP, engine.nN, engine.nfP, engine.nfN))
# sim_strings_actual = engine.similarities
# assert sim_strings_actual == sim_strings_expected
blocking_scheme_actual = {(x[1], x[2]) for x in engine.blocking_scheme_to_strings()}
assert blocking_scheme_actual == blocking_scheme_expected
# Build the index
engine.build_csv("../../master_thesis/datasets/restaurant/restaurant_index.csv",
["id","name","addr","city","phone","type"])
fresh_nrecords = engine.indexer.nrecords
assert fresh_nrecords == 431
# Query the index
engine.query_csv("../../master_thesis/datasets/restaurant/restaurant_train_query.csv",
["id","name","addr","city","phone","type"])
assert engine.indexer.nrecords == 576
# Metrics
fresh_pc = engine.pairs_completeness()
fresh_pq = engine.pairs_quality()
fresh_rr = engine.reduction_ratio()
print("Pair completeness:", fresh_pc)
print("Pair quality:", fresh_pq)
print("Reduction ratio:", fresh_rr)
print("Recall:", engine.recall())
print("Precision:", engine.precision())
print("F1 Score:", engine.f1_score())
del engine
print()
print("--------------------------------------------------------------")
print("Testing saved engine (%s) on restaurant dataset:" % indexer.__name__)
# Test with saved engine
engine = SimEngine("restaurant", indexer=indexer, verbose=verbose,
max_positive_labels=111, max_negative_labels=333)
gold_csv = "../../master_thesis/datasets/restaurant/restaurant_train_gold.csv"
engine.read_ground_truth(gold_standard=gold_csv, gold_attributes=["id_1", "id_2"])
engine.fit_csv("../../master_thesis/datasets/restaurant/restaurant_train.csv",
["id","name","addr","city","phone","type"])
# sim_strings_actual = engine.similarities
# assert sim_strings_actual == sim_strings_expected
blocking_scheme_actual = {(x[1], x[2]) for x in engine.blocking_scheme_to_strings()}
assert blocking_scheme_actual == blocking_scheme_expected
# Build the index
engine.build_csv("../../master_thesis/datasets/restaurant/restaurant_index.csv",
["id","name","addr","city","phone","type"])
saved_nrecords = engine.indexer.nrecords
assert fresh_nrecords == saved_nrecords
# Query the index
gold_csv = "../../master_thesis/datasets/restaurant/restaurant_train_gold.csv"
engine.read_ground_truth(gold_standard=gold_csv, gold_attributes=["id_1", "id_2"])
engine.query_csv("../../master_thesis/datasets/restaurant/restaurant_train_query.csv",
["id","name","addr","city","phone","type"])
# Metrics
saved_pc = engine.pairs_completeness()
saved_pq = engine.pairs_quality()
saved_rr = engine.reduction_ratio()
assert fresh_pc == saved_pc
assert fresh_pq == saved_pq
assert fresh_rr == saved_rr
print("Pair completeness:", saved_pc)
print("Pair quality:", saved_pq)
print("Reduction ratio:", saved_rr)
print("Recall:", engine.recall())
print("Precision:", engine.precision())
print("F1 Score:", engine.f1_score())
# Cleanup
restaurant_cleanup(engine)
def restaurant_cleanup(engine):
# Cleanup
if os.path.exists(engine.configstore_name):
os.remove(engine.configstore_name)
if os.path.exists(engine.traindatastore_name):
os.remove(engine.traindatastore_name)
if os.path.exists(engine.indexdatastore_name):
os.remove(engine.indexdatastore_name)
if os.path.exists(engine.querydatastore_name):
os.remove(engine.querydatastore_name)
if os.path.exists(".%s_dnfbs.inc" % engine.name):
os.remove(".%s_dnfbs.inc" % engine.name)
if os.path.exists(".%s_lsh.idx" % engine.name):
os.remove(".%s_lsh.idx" % engine.name)
if os.path.exists(".%s_nrecords.idx" % engine.name):
os.remove(".%s_nrecords.idx" % engine.name)
if os.path.exists(".%s_RI.idx" % engine.name):
os.remove(".%s_RI.idx" % engine.name)
if os.path.exists(".%s_FBI.idx" % engine.name):
os.remove(".%s_FBI.idx" % engine.name)
if os.path.exists(".%s_SI.idx" % engine.name):
os.remove(".%s_SI.idx" % engine.name)
if os.path.exists(".%s_ncomplete.lbl" % engine.name):
os.remove(".%s_ncomplete.lbl" % engine.name)
if os.path.exists(".%s_weights.lbl" % engine.name):
os.remove(".%s_weights.lbl" % engine.name)
if os.path.exists(".%s_maxp.lbl" % engine.name):
os.remove(".%s_maxp.lbl" % engine.name)
if os.path.exists(".%s_maxn.lbl" % engine.name):
os.remove(".%s_maxn.lbl" % engine.name)
if os.path.exists(".%s_model.cls" % engine.name):
os.remove(".%s_model.cls" % engine.name)
def test_engine_preprocessing(verbose):
test_store = pd.HDFStore("preproc.th5",
driver="H5FD_CORE",
driver_core_backing_store=0)
engine = SimEngine("preproc", indexer=None, verbose=verbose)
with tempfile.NamedTemporaryFile() as fp:
fp.write(b'id, attr1, attr2\n')
fp.write(b'1, This is aNd a test, And for another the test in\n')
fp.flush()
fp.seek(0)
engine.pre_process_data(test_store, fp.name, None)
expected_attr1 = 'this is test'
expected_attr2 = 'another test'
for row in hp.hdf_record_attributes(test_store, 'preproc'):
assert row[0] == expected_attr1
assert row[1] == expected_attr2
| mpl-2.0 |
toobaz/pandas | pandas/tests/sparse/test_combine_concat.py | 1 | 18880 | import itertools
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=kind)
result = pd.SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_uses_first_kind(self, kind):
other = "integer" if kind == "block" else "block"
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=other)
result = pd.SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestSparseSeriesConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_concat(self, kind):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, name="y", kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
sparse1 = pd.SparseSeries(val1, fill_value=0, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name="y", kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x")
sparse2 = pd.SparseSeries(val2, name="y")
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ["integer", "block"]:
sparse1 = pd.SparseSeries(val1, name="x", kind=kind)
sparse2 = pd.SparseSeries(val2, name="y", kind=kind, fill_value=0)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x")
sparse2 = pd.SparseSeries(val2, name="y", fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name="x"), pd.Series(val2, name="y")], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name="x", kind="integer")
sparse2 = pd.SparseSeries(val2, name="y", kind="block")
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=sparse1.kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=sparse2.kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_concat_sparse_dense(self, kind):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse = pd.SparseSeries(val1, name="x", kind=kind)
dense = pd.Series(val2, name="y")
res = pd.concat([sparse, dense])
exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(pd.SparseArray(exp, kind=kind), index=exp.index, name=exp.name)
tm.assert_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name="x", kind=kind, fill_value=0)
dense = pd.Series(val2, name="y")
res = pd.concat([sparse, dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.concat([pd.Series(val1), dense])
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
)
tm.assert_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0), index=exp.index, name=exp.name
)
tm.assert_series_equal(res, exp)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameConcat:
def setup_method(self, method):
self.dense1 = pd.DataFrame(
{
"A": [0.0, 1.0, 2.0, np.nan],
"B": [0.0, 0.0, 0.0, 0.0],
"C": [np.nan, np.nan, np.nan, np.nan],
"D": [1.0, 2.0, 3.0, 4.0],
}
)
self.dense2 = pd.DataFrame(
{
"A": [5.0, 6.0, 7.0, 8.0],
"B": [np.nan, 0.0, 7.0, 8.0],
"C": [5.0, 6.0, np.nan, np.nan],
"D": [np.nan, np.nan, np.nan, np.nan],
}
)
self.dense3 = pd.DataFrame(
{
"E": [5.0, 6.0, 7.0, 8.0],
"F": [np.nan, 0.0, 7.0, 8.0],
"G": [5.0, 6.0, np.nan, np.nan],
"H": [np.nan, np.nan, np.nan, np.nan],
}
)
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
# stacklevel is wrong since we have two FutureWarnings,
# one for depr, one for sorting.
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
):
res = pd.concat([sparse, sparse3])
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_bug(self):
from pandas.core.sparse.api import SparseDtype
x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan], fill_value=0)})
y = pd.SparseDataFrame({"B": []})
res = pd.concat([x, y], sort=False)[["A"]]
exp = pd.DataFrame(
{"A": pd.SparseArray([np.nan, np.nan], dtype=SparseDtype(float, 0))}
)
tm.assert_frame_equal(res, exp)
def test_concat_different_columns_buggy(self):
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ["A", "D"]:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ["C", "D"]:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, check_kind=False, consolidate_block_indices=True
)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse(fill_value=0)
exp["C"] = res["C"]
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(
res, exp, consolidate_block_indices=True, check_kind=False
)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize(
"fill_value,sparse_idx,dense_idx",
itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
)
def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
frames = [self.dense1, self.dense2]
sparse_frame = [
frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value),
]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame)
exp = pd.concat(dense_frame)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
@pytest.mark.parametrize(
"fill_value,sparse_idx,dense_idx",
itertools.product([None, 0, 1, np.nan], [0, 1], [1, 0]),
)
@pytest.mark.xfail(reason="The iloc fails and I can't make expected", strict=False)
def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
# See GH16874, GH18914 and #18686 for why this should be a DataFrame
from pandas.core.dtypes.common import is_sparse
frames = [self.dense1, self.dense3]
sparse_frame = [
frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value),
]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame, axis=1)
exp = pd.concat(dense_frame, axis=1)
cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
for col in cols:
exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
for column in frames[dense_idx].columns:
if dense_idx == sparse_idx:
tm.assert_frame_equal(res[column], exp[column])
else:
tm.assert_series_equal(res[column], exp[column])
tm.assert_frame_equal(res, exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
| bsd-3-clause |
ShapeNet/JointEmbedding | src/image_embedding_testing/prepare_testing.py | 1 | 1733 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import argparse
import fileinput
#https://github.com/BVLC/caffe/issues/861#issuecomment-70124809
import matplotlib
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from utilities_caffe import stack_caffe_models
parser = argparse.ArgumentParser(description="Stitch pool5 extraction and image embedding caffemodels together.")
parser.add_argument('--iter_num', '-n', help='Use image embedding model trained after iter_num iterations', type=int, default=20000)
args = parser.parse_args()
image_embedding_testing_in = os.path.join(BASE_DIR, 'image_embedding_'+g_network_architecture_name+'.prototxt.in')
print 'Preparing %s...'%(g_image_embedding_testing_prototxt)
shutil.copy(image_embedding_testing_in, g_image_embedding_testing_prototxt)
for line in fileinput.input(g_image_embedding_testing_prototxt, inplace=True):
line = line.replace('embedding_space_dim', str(g_shape_embedding_space_dimension))
sys.stdout.write(line)
image_embedding_caffemodel = os.path.join(g_image_embedding_training_folder, 'snapshots%s_iter_%d.caffemodel'%(g_shapenet_synset_set_handle, args.iter_num))
image_embedding_caffemodel_stacked = os.path.join(g_image_embedding_testing_folder, 'snapshots%s_iter_%d.caffemodel'%(g_shapenet_synset_set_handle, args.iter_num))
stack_caffe_models(prototxt=g_image_embedding_testing_prototxt,
base_model=g_fine_tune_caffemodel,
top_model=image_embedding_caffemodel,
stacked_model=image_embedding_caffemodel_stacked,
caffe_path=g_caffe_install_path) | bsd-3-clause |
bmazin/SDR | Projects/BestBeammap/palDiff.py | 1 | 1891 | #!/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
def getList(file):
posList= np.recfromtxt(file)
l = [posList['f0'],posList['f2'],posList['f3']]
l = np.array(l)
l = l.T
names = posList['f4']
return l,names
left,leftNames = getList('freq_atten_x_y_palleft.txt')
#leftX,leftXNames = getList('good_x_left.txt')
#leftY,leftYNames = getList('good_y_left.txt')
#left2,left2Names = getList('freq_atten_x_y_left1.txt')
right,rightNames = getList('freq_atten_x_y_palright.txt')
#rightX,rightXNames = getList('good_x_right.txt')
#rightY,rightYNames = getList('good_y_right.txt')
#right2,right2Names = getList('freq_atten_x_y_right2.txt')
#move left to match right
#left[:,1]+=.4
#left[:,2]-=.2
#leftX[:,2]=-2
#leftY[:,1]=-2
#rightX[:,2]=-2
#rightY[:,1]=-2
#leftX[:,1]+=.4
#leftY[:,2]-=.2
#right2[:,1]+=2
#right2[:,2]-=1
for iR,r in enumerate(right):
for iL,l in enumerate(left):
if abs(r[0]-l[0]) < 250e-6 and int(rightNames[iR][2])/4 == int(leftNames[iL][2])/4:
plt.plot([r[1],l[1]],[r[2],l[2]],'g-')
#cm = matplotlib.cm.get_cmap('jet')
vmax = max([np.max(left[:,0]),np.max(right[:,0])])
vmin = min([np.min(left[:,0]),np.min(right[:,0])])
plt.scatter(left[:,1],left[:,2],marker='o',alpha=0.5,s=100,label='left',color='k')
#plt.scatter(leftX[:,1],leftX[:,2],c=leftX[:,0],cmap=cm,vmax=vmax,vmin=vmin,marker='<',alpha=0.5,s=100,label='leftX')
#plt.scatter(leftY[:,1],leftY[:,2],c=leftY[:,0],cmap=cm,vmax=vmax,vmin=vmin,marker='<',alpha=0.5,s=100,label='leftX')
#plt.scatter(rightY[:,1],rightY[:,2],c=rightY[:,0],cmap=cm,vmax=vmax,vmin=vmin,alpha=0.5,s=100,label='rightY',marker='>')
#plt.scatter(rightX[:,1],rightX[:,2],c=rightX[:,0],cmap=cm,vmax=vmax,vmin=vmin,alpha=0.5,s=100,label='rightX',marker='>')
plt.scatter(right[:,1],right[:,2],alpha=0.5,s=100,label='right',marker='>',color='r')
plt.legend()
plt.show()
| gpl-2.0 |
mwiebe/numpy | numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
sunk/spacetime | nips_exp.py | 1 | 11758 | #!/usr/bin/env python
from __future__ import print_function
import dist2, spacetime
import numpy as np
import scipy.io
import os, sys, itertools
EPS = np.finfo( float ).eps
def __rebuild( data_file, bin_file, num_vols, dtype, binary ):
'''
parse the raw data in data_file and
store the co-authorship matrix in bin_file
'''
print( 'rebuilding P matrix' )
raw = scipy.io.loadmat( data_file )
if num_vols == 17:
documents = np.array( raw['docs_authors'].todense(), dtype=np.uint8 )
authors = raw['authors_names'][0]
elif num_vols == 22:
documents = np.array( raw['documents'].todense(), dtype=np.uint8 )
authors = raw['authors'][0]
else:
raise RuntimeError( 'num_vols=%d' % num_vols )
N = authors.shape[0]
assert( N == documents.shape[1] )
print( 'originally, %d authors, %d papers in total' % \
( N, documents.shape[0] ) )
# building the co-authorship matrix within authors with 2 papers
have_two_papers = ( documents.sum(0) >= 2 )
C = np.zeros( [N, N], dtype=dtype )
for doc in documents:
for a1, a2 in itertools.combinations( np.nonzero( doc )[0], 2 ):
if have_two_papers[a1] and have_two_papers[a2]:
C[a1, a2] += 1
C[a2, a1] += 1
idx = ( C.sum(0) >= 1 )
print( 'removing %d young authors' % ( C.shape[0] - idx.sum() ) )
C = C[idx][:,idx]
if binary:
C = ( C > 0 ).astype( dtype )
print( 'binarized with density %.2f%%' % (C.sum()*100./C.size) )
assert( np.allclose( C, C.T ) )
authors = authors[idx]
documents = documents[:, idx]
print( '%d authors left' % C.shape[0] )
print( "they co-authored %d papers" % ( documents.sum(1) >= 2 ).sum() )
# normalize P
P = C.copy()
P /= P.sum(0)
P = P + P.T
P /= P.sum()
P = np.maximum( P, EPS )
print( "saving to '%s'" % bin_file )
np.savez( bin_file, C=C, P=P, authors=authors, no_papers=documents.sum( 0 ) )
def load_nips( num_vols=22, dtype=np.float32, binary=False ):
'''load the NIPS co-authorship dataset'''
if not ( num_vols in (17,22) ):
raise RuntimeError( 'num_vols=%d' % num_vols )
data_file = 'data/nips_1-%d.mat' % num_vols
bin_file = os.path.splitext( data_file )[0] + '.npz'
if not os.access( data_file, os.R_OK ):
raise RuntimeError( "'%s' missing" % data_file )
if not os.access( bin_file, os.R_OK ):
__rebuild( data_file, bin_file, num_vols, dtype, binary )
print( 'loading nips data from %s' % bin_file )
_tmp = np.load( bin_file )
return _tmp['C'], _tmp['P'], _tmp['authors'], _tmp['no_papers']
def overlap_ratio( past_bb, bb ):
o_ratio = 0
for pp in past_bb:
x_overlap = max( 0, min(pp[1,0],bb[1,0])-max(pp[0,0],bb[0,0]) )
y_overlap = max( 0, min(pp[1,1],bb[1,1])-max(pp[0,1],bb[0,1]) )
o_ratio += (x_overlap * y_overlap )
area = (bb[1,0]-bb[0,0])*(bb[1,1]-bb[0,1])
return (o_ratio * 1.0 / area )
def find_renderer( fig ):
if hasattr(fig.canvas, "get_renderer"):
renderer = fig.canvas.get_renderer()
else:
import io
fig.canvas.print_pdf(io.BytesIO())
renderer = fig._cachedRenderer
return( renderer )
def __visualize( Y, Z, authors, no_papers, ofile ):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.lines as lines
import matplotlib.cm as cmx
from matplotlib import rc
rc( 'pdf', fonttype=42 )
rc( 'ps', fonttype=42 )
fig = plt.figure( figsize=[8,8], frameon=True, dpi=600 )
ax = plt.Axes( fig, [0., 0., 1., 1.] )
ax.set_aspect( 'equal' )
fig.add_axes( ax )
guys = np.logical_or( ( no_papers >= 10 ),
( np.abs( Z[:,0] ) > 1 ) )
others = np.logical_not( guys )
cNorm = colors.Normalize( vmin=-1, vmax=1 )
scalarMap = cmx.ScalarMappable( norm=cNorm, cmap='RdYlBu_r' )
ax.scatter( Y[others,0], Y[others,1],
s=15,
c=Z[others,0],
cmap='RdYlBu_r',
norm=cNorm,
alpha=.4,
edgecolors='none' )
x_min = Y[guys,0].min()
x_max = Y[guys,0].max()
x_gap = .02 * (x_max-x_min)
y_min = Y[guys,1].min()
y_max = Y[guys,1].max()
y_gap = .02 * (y_max-y_min)
plt.xlim( x_min-x_gap, x_max+x_gap )
plt.ylim( y_min-y_gap, y_max+y_gap )
if False:
scale_plot = .5 * ( (x_max-x_min) + (y_max-y_min) )
connections = np.transpose( np.nonzero( C > 2 ) )
violate = 0
for a1, a2 in connections:
if np.sqrt( ((y[a1]-y[a2])**2).sum() ) > .3 * scale_plot:
ax.add_line( lines.Line2D( [y[a1,0], y[a2,0]], [y[a1,1], y[a2,1]],
linewidth=1, color='r', alpha=.5 ) )
violate += 1
print( violate )
offset = .01 * ( plt.xlim()[1]-plt.xlim()[0] )
font_s = np.abs(Z) * 9
#alpha = np.minimum( np.maximum( (no_papers-10) / 10., 0 ), 1 )
#alpha = alpha * .6 + .35
text_positions = Y
past_bb = []
for i in np.nonzero( guys )[0]:
_x = text_positions[i][0]
_y = text_positions[i][1]
_a = authors[i][0].split('_')[0]
tt = ax.text( _x, _y, _a,
size=font_s[i],
rotation=0,
color=scalarMap.to_rgba( Z[i,0] ),
alpha = .9,
verticalalignment='center',
horizontalalignment='center' )
transf = ax.transData.inverted()
bb = tt.get_window_extent( renderer = find_renderer( fig ) ).transformed( transf ).get_points()
if overlap_ratio( past_bb, bb ) > .02:
best_o = np.inf
best_adjust = None
search_range = np.vstack( [ np.linspace( 0, 5*offset, 20 ),
np.linspace( 0, -5*offset, 20 ) ]
).flatten('F')
for x_adjust in search_range:
for y_adjust in search_range:
oratio = overlap_ratio( past_bb, bb + np.array([x_adjust,y_adjust]) )
if oratio < best_o * .95:
best_o = oratio
best_adjust = np.array( [x_adjust,y_adjust] )
#if best_o > .02 and no_papers[i] < 15 and not (_a in whitelist):
# tt.set_alpha( 0 )
# print( 'sorry %20s %5.2f (%2d NIPS papers)' % (_a, best_o, no_papers[i] ) )
#else:
bb += best_adjust
tt.set_x( .5*(bb[0,0]+bb[1,0]) )
tt.set_y( .5*(bb[0,1]+bb[1,1]) )
past_bb.append( bb )
# histogram of Z
ax_inset = plt.axes( (0.77, 0.03, 0.2, 0.2), frameon=False )
counts, bins, patches = ax_inset.hist( Z, 9, fc='0.5', ec='gray' )
ax_inset.xaxis.set_ticks_position( "none" )
ax_inset.yaxis.set_ticks_position( "left" )
plt.xticks( [-1.5, 0, 1.5], size=8 )
plt.yticks( [50, 100, 150, 200, 250], size=8 )
for _bin, _patch in zip( bins, patches ):
_patch.set_facecolor( scalarMap.to_rgba( _bin ) )
ax_inset.set_title( 'histogram of time coordinates', size=9 )
# colorbar
scalarMap._A = []
cax = fig.add_axes( [1.01, 0, 0.04, 1] )
cbar = fig.colorbar( scalarMap, ticks=[-1, -.5, 0, .5, 1], cax=cax )
cax.text( .5, .5, '---time-->', size=14, rotation=90, verticalalignment='center', horizontalalignment='center' )
cbar.ax.yaxis.set_ticks_position( 'right' )
cbar.ax.set_yticklabels( ['<-1.0', '-0.5', '0', '0.5', '>1.0'] )
# axes
ax.tick_params( right='off', top='off' )
ax.set_xticks( [ -250, -150, 0, 150, 250] )
ax.set_yticks( [ -250, -150, 0, 150, 250] )
fig.savefig( ofile,
bbox_inches='tight',
pad_inches=0,
transparent=True )
def __embed( P, result_file, methods, repeat ):
'''
(optionally) compute the embeding and save to disk
then load the embedding from disk
'''
if not os.access( result_file, os.R_OK ):
# some good configurations for NIPS22
spacetime.conv_threshold = 1e-9
spacetime.min_epochs = 500
spacetime.lrate_s = 500
spacetime.lrate_t = 1
sne_Y = None
sne_E = 0
if 'sne' in methods:
spacetime.distribution = 'gaussian'
sne_Y,_tmp,sne_E = spacetime.st_snep( P, 3, 0, repeat=repeat )
tsne_Y = None
tsne_E = 0
if 'tsne' in methods:
spacetime.distribution = 'student'
tsne_Y,_tmp,tsne_E = spacetime.st_snep( P, 3, 0, repeat=repeat )
spacetime_Y = spacetime_Z = None
spacetime_E = 0
if 'st' in methods:
spacetime.distribution = 'student'
spacetime_Y,spacetime_Z,spacetime_E = \
spacetime.st_snep( P, 2, 1, repeat=repeat )
np.savez( result_file,
sne_Y=sne_Y,
sne_E=sne_E,
tsne_Y=tsne_Y,
tsne_E=tsne_E,
spacetime_Y=spacetime_Y,
spacetime_Z=spacetime_Z,
spacetime_E=spacetime_E,
)
print( 'loading results from %s' % result_file )
tmp = np.load( result_file )
return ( tmp['sne_Y'], tmp['sne_E'],
tmp['tsne_Y'], tmp['tsne_E'],
tmp['spacetime_Y'], tmp['spacetime_Z'], tmp['spacetime_E'] )
if __name__ == '__main__':
REPEAT = 50
METHODS = ['st'] #[ 'sne', 'tsne', 'st' ]
C, P, authors, no_papers = load_nips()
print( "%d authors" % C.shape[0] )
big_guys = np.nonzero( no_papers >= 10 )[0]
print( "%d authors have >=10 NIPS papers" % big_guys.size )
if len( sys.argv ) > 1:
result_file = 'results/nips_result_%s.npz' % sys.argv[1]
else:
result_file = 'results/nips_result.npz'
sne_Y, sne_E, tsne_Y, tsne_E, \
spacetime_Y, spacetime_Z, spacetime_E \
= __embed( P, result_file, METHODS, REPEAT )
# for single space time embedding
scale_Z = np.sqrt( (spacetime_Z**2).sum(1) )
scale_Y = np.sqrt( (spacetime_Y**2).sum(1) )
rank = np.argsort( scale_Z )[::-1]
print( 'top 25 authors by z:' )
for i in rank[:25]:
print( '%20s z=%7.3f papers=%2d' % \
( authors[i][0], spacetime_Z[i,0], no_papers[i] ) )
rank_paper = np.argsort( no_papers )[::-1]
print( 'top 25 authors by #papers:' )
for i in rank_paper[:25]:
print( '%20s z=%7.3f papers=%2d' % \
( authors[i][0], spacetime_Z[i,0], no_papers[i] ) )
print( 'E[sne]=', sne_E )
print( 'E[tsne]=', tsne_E )
print( 'E[spacetime] =', spacetime_E )
#for alg,Y in [ ('sne',sne_Y), ('tsne',tsne_Y), ('spacetime',spacetime_Y) ]:
# d2 = dist2.dist2( Y )
# radius = np.sqrt( d2.max(1) ).mean()
# print( '[%s] co-author distance=%.3f' % ( alg,
# np.sqrt( d2 * (C > 0) ).mean() / radius ) )
#scale_no = ( no_papers - no_papers.min() ) / ( no_papers.max() - no_papers.min() )
#visualize( tsne_y, authors, C, scale_no, big_guys, 't-SNE' )
#visualize( tsne_multi_y[0], authors, C, no_papers, big_guys, 't-SNE_0' )
#visualize( tsne_multi_y[1], authors, C, no_papers, big_guys, 't-SNE_1' )
#visualize( spacetime_multi_y[0], authors, C, spacetime_multi_w[0], big_guys, 'spacetime_0' )
#visualize( spacetime_multi_y[1], authors, C, spacetime_multi_w[1], big_guys, 'spacetime_1' )
fig_file = os.path.splitext( result_file )[0] + '.pdf'
__visualize( spacetime_Y, spacetime_Z, authors, no_papers, fig_file )
| bsd-2-clause |
enigmampc/catalyst | catalyst/data/treasuries_can.py | 15 | 5257 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import six
from toolz import curry
from toolz.curried.operator import add as prepend
COLUMN_NAMES = {
"V39063": '1month',
"V39065": '3month',
"V39066": '6month',
"V39067": '1year',
"V39051": '2year',
"V39052": '3year',
"V39053": '5year',
"V39054": '7year',
"V39055": '10year',
# Bank of Canada refers to this as 'Long' Rate, approximately 30 years.
"V39056": '30year',
}
BILL_IDS = ['V39063', 'V39065', 'V39066', 'V39067']
BOND_IDS = ['V39051', 'V39052', 'V39053', 'V39054', 'V39055', 'V39056']
@curry
def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
"""
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
)
format_bill_url = _format_url('tbill', BILL_IDS)
format_bond_url = _format_url('bond', BOND_IDS)
def load_frame(url, skiprows):
"""
Load a DataFrame of data from a Bank of Canada site.
"""
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES)
def check_known_inconsistencies(bill_data, bond_data):
"""
There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download.
"""
inconsistent_dates = bill_data.index.sym_diff(bond_data.index)
known_inconsistencies = [
# bill_data has an entry for 2010-02-15, which bond_data doesn't.
# bond_data has an entry for 2006-09-04, which bill_data doesn't.
# Both of these dates are bank holidays (Flag Day and Labor Day,
# respectively).
pd.Timestamp('2006-09-04', tz='UTC'),
pd.Timestamp('2010-02-15', tz='UTC'),
# 2013-07-25 comes back as "Not available" from the bills endpoint.
# This date doesn't seem to be a bank holiday, but the previous
# calendar implementation dropped this entry, so we drop it as well.
# If someone cares deeply about the integrity of the Canadian trading
# calendar, they may want to consider forward-filling here rather than
# dropping the row.
pd.Timestamp('2013-07-25', tz='UTC'),
]
unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)
if len(unexpected_inconsistences):
in_bills = bill_data.index.difference(bond_data.index).difference(
known_inconsistencies
)
in_bonds = bond_data.index.difference(bill_data.index).difference(
known_inconsistencies
)
raise ValueError(
"Inconsistent dates for Canadian treasury bills vs bonds. \n"
"Dates with bills but not bonds: {in_bills}.\n"
"Dates with bonds but not bills: {in_bonds}.".format(
in_bills=in_bills,
in_bonds=in_bonds,
)
)
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
today = pd.Timestamp('now', tz='UTC').normalize()
# Bank of Canada only has the last 10 years of data at any given time.
return today.replace(year=today.year - 10)
def get_treasury_data(start_date, end_date):
bill_data = load_frame(
format_bill_url(start_date, end_date, start_date),
# We skip fewer rows here because we query for fewer bill fields,
# which makes the header smaller.
skiprows=18,
)
bond_data = load_frame(
format_bond_url(start_date, end_date, start_date),
skiprows=22,
)
check_known_inconsistencies(bill_data, bond_data)
# dropna('any') removes the rows for which we only had data for one of
# bills/bonds.
out = pd.concat([bond_data, bill_data], axis=1).dropna(how='any')
assert set(out.columns) == set(six.itervalues(COLUMN_NAMES))
# Multiply by 0.01 to convert from percentages to expected output format.
return out * 0.01
| apache-2.0 |
quiltdata/quilt | api/python/quilt3/bucket.py | 1 | 6293 | """
bucket.py
Contains the Bucket class, which provides several useful functions
over an s3 bucket.
"""
import pathlib
from .data_transfer import (
copy_file,
delete_object,
list_object_versions,
list_objects,
select,
)
from .search_util import search_api
from .util import PhysicalKey, QuiltException, fix_url
class Bucket:
"""Bucket interface for Quilt.
"""
def __init__(self, bucket_uri):
"""
Creates a Bucket object.
Args:
bucket_uri(str): URI of bucket to target. Must start with 's3://'
Returns:
A new Bucket
"""
self._pk = PhysicalKey.from_url(bucket_uri)
if self._pk.is_local():
raise QuiltException("Bucket URI must be an S3 URI")
if self._pk.path or self._pk.version_id is not None:
raise QuiltException("Bucket URI shouldn't contain a path or a version ID")
def search(self, query, limit=10):
"""
Execute a search against the configured search endpoint.
Args:
query (str): query string to search
limit (number): maximum number of results to return. Defaults to 10
Query Syntax:
By default, a normal plaintext search will be executed over the query string.
You can use field-match syntax to filter on exact matches for fields in
your metadata.
The syntax for field match is `user_meta.$field_name:"exact_match"`.
Returns:
a list of objects with the following structure:
```
[{
"key": <key of the object>,
"version_id": <version_id of object version>,
"operation": <"Create" or "Delete">,
"meta": <metadata attached to object>,
"size": <size of object in bytes>,
"text": <indexed text of object>,
"source": <source document for object (what is actually stored in ElasticSeach)>,
"time": <timestamp for operation>,
}...]
```
"""
return search_api(query, index=self._pk.bucket, limit=limit)
def put_file(self, key, path):
"""
Stores file at path to key in bucket.
Args:
key(str): key in bucket to store file at
path(str): string representing local path to file
Returns:
None
Raises:
* if no file exists at path
* if copy fails
"""
dest = self._pk.join(key)
copy_file(PhysicalKey.from_url(fix_url(path)), dest)
def put_dir(self, key, directory):
"""
Stores all files in the `directory` under the prefix `key`.
Args:
key(str): prefix to store files under in bucket
directory(str): path to directory to grab files from
Returns:
None
Raises:
* if writing to bucket fails
"""
# Ensure key ends in '/'.
if key and key[-1] != '/':
key += '/'
src_path = pathlib.Path(directory)
if not src_path.is_dir():
raise QuiltException("Provided directory does not exist")
src = PhysicalKey.from_path(str(src_path) + '/')
dest = self._pk.join(key)
copy_file(src, dest)
def keys(self):
"""
Lists all keys in the bucket.
Returns:
List of strings
"""
return [x.get('Key') for x in list_objects(self._pk.bucket, '')]
def delete(self, key):
"""
Deletes a key from the bucket.
Args:
key(str): key to delete
Returns:
None
Raises:
* if delete fails
"""
if not key:
raise QuiltException("Must specify the key to delete")
if key[-1] == '/':
raise QuiltException("Must use delete_dir to delete directories")
delete_object(self._pk.bucket, key)
def delete_dir(self, path):
"""Delete a directory and all of its contents from the bucket.
Parameters:
path (str): path to the directory to delete
"""
results = list_objects(self._pk.bucket, path)
for result in results:
self.delete(result['Key'])
def ls(self, path=None, recursive=False):
"""List data from the specified path.
Parameters:
path (str): bucket path to list
recursive (bool): show subdirectories and their contents as well
Returns:
``list``: Return value structure has not yet been permanently decided
Currently, it's a ``tuple`` of ``list`` objects, containing the
following: (directory info, file/object info, delete markers).
"""
if path and not path.endswith('/'):
path += '/'
elif not path:
path = "" # enumerate top-of-bucket
results = list_object_versions(self._pk.bucket, path, recursive=recursive)
return results
def fetch(self, key, path):
"""
Fetches file (or files) at `key` to `path`.
If `key` ends in '/', then all files with the prefix `key` will match and
will be stored in a directory at `path`.
Otherwise, only one file will be fetched and it will be stored at `path`.
Args:
key(str): key in bucket to fetch
path(str): path in local filesystem to store file or files fetched
Returns:
None
Raises:
* if path doesn't exist
* if download fails
"""
source = self._pk.join(key)
dest = PhysicalKey.from_url(fix_url(path))
copy_file(source, dest)
def select(self, key, query, raw=False):
"""
Selects data from an S3 object.
Args:
key(str): key to query in bucket
query(str): query to execute (SQL by default)
query_type(str): other query type accepted by S3 service
raw(bool): return the raw (but parsed) response
Returns:
pandas.DataFrame: results of query
"""
source = self._pk.join(key)
return select(source, query, raw=raw)
| apache-2.0 |
zohannn/motion_manager | scripts/old/task_2_reaching/var_1/predicting_task_2_var_1_10k.py | 2 | 138480 | #!/usr/bin/env python3
import sys
import pandas as pd
from sklearn import decomposition
from sklearn import metrics
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import math
from random import randint
# HUPL
from HUPL.learner import preprocess_features
from HUPL.learner import preprocess_targets
from HUPL.learner import normalize_linear_scale
from HUPL.learner import denormalize_linear_scale
from HUPL.learner import my_input_fn
from HUPL.learner import construct_feature_columns
if len(sys.argv) <= 3:
sys.exit("Not enough args")
data_file = str(sys.argv[1])
models_dir = str(sys.argv[2])
pred_file_path = str(sys.argv[3])
data_pred = sys.argv[4].split(',')
data_pred_mod = np.array(data_pred)
# Target info
#target_x = float(data_pred[0])
#target_y = float(data_pred[1])
#target_z = float(data_pred[2])
#target_roll = float(data_pred[3])
#target_pitch = float(data_pred[4])
#target_yaw = float(data_pred[5])
# Obstacle 1 info
#obstacle_1_x = float(data_pred[6])
#obstacle_1_y = float(data_pred[7])
#obstacle_1_z = float(data_pred[8])
#obstacle_1_roll = float(data_pred[9])
#obstacle_1_pitch = float(data_pred[10])
#obstacle_1_yaw = float(data_pred[11])
# Settings
pd.set_option('display.max_columns', 10)
print_en = False
print_en_xf_plan = False
predict_xf_plan = True
dir_path_xf_plan = models_dir+"/xf_plan"
xf_plan_prediction = pd.DataFrame()
print_en_zf_L_plan = False
predict_zf_L_plan = True
dir_path_zf_L_plan = models_dir+"/zf_L_plan"
zf_L_plan_prediction = pd.DataFrame()
print_en_zf_U_plan = False
predict_zf_U_plan = True
dir_path_zf_U_plan = models_dir+"/zf_U_plan"
zf_U_plan_prediction = pd.DataFrame()
print_en_dual_f_plan = False
predict_dual_f_plan = True
dir_path_dual_f_plan = models_dir+"/dual_f_plan"
dual_f_plan_prediction = pd.DataFrame()
print_en_x_bounce = False
predict_x_bounce = True
dir_path_x_bounce = models_dir+"/x_bounce"
x_bounce_prediction = pd.DataFrame()
print_en_zb_L= False
predict_zb_L = True
dir_path_zb_L = models_dir+"/zb_L"
zb_L_prediction = pd.DataFrame()
print_en_zb_U = False
predict_zb_U = True
dir_path_zb_U = models_dir+"/zb_U"
zb_U_prediction = pd.DataFrame()
print_en_dual_bounce = False
predict_dual_bounce = True
dir_path_dual_bounce = models_dir+"/dual_bounce"
dual_bounce_prediction = pd.DataFrame()
learning_rate=0.009
learning_rate_class=0.009
n_pca_comps_xf_plan = 7
n_clusters_xf_plan = 6
min_cluster_size_xf_plan = 10
th_xf_plan = 0.001
periods_xf_plan = 20
steps_xf_plan = 1000
batch_size_xf_plan = 100
units_xf_plan = [10,10]
units_xf_plan_class = [10,10,10]
n_clusters_zf_L_plan = 1
min_cluster_size_zf_L_plan = 10
th_zf_L_plan = 0.001
periods_zf_L_plan = 15
steps_zf_L_plan = 500
batch_size_zf_L_plan = 100
units_zf_L_plan = [10,10]
units_zf_L_plan_class = [10,10,10]
n_clusters_zf_U_plan = 1
min_cluster_size_zf_U_plan = 10
th_zf_U_plan = 0.001
periods_zf_U_plan = 10
steps_zf_U_plan = 1000
batch_size_zf_U_plan = 100
units_zf_U_plan = [10,10]
units_zf_U_plan_class = [10,10,10]
n_pca_comps_dual_f_plan = 10
n_clusters_dual_f_plan = 4
min_cluster_size_dual_f_plan = 10
th_dual_f_plan = 0.0001
periods_dual_f_plan = 10
steps_dual_f_plan = 1000
batch_size_dual_f_plan = 100
units_dual_f_plan = [10,10]
units_dual_f_plan_class = [10,10,10]
n_pca_comps_x_bounce = 9
n_clusters_x_bounce = 6
min_cluster_size_x_bounce = 10
th_x_bounce = 0.001
periods_x_bounce = 20
steps_x_bounce = 1000
batch_size_x_bounce = 100
units_x_bounce = [10,10]
units_x_bounce_class = [10,10,10]
n_clusters_zb_L = 2
min_cluster_size_zb_L = 10
th_zb_L = 0.001
periods_zb_L = 10
steps_zb_L = 500
batch_size_zb_L = 100
units_zb_L = [10,10]
units_zb_L_class = [10,10,10]
n_clusters_zb_U = 2
min_cluster_size_zb_U = 10
th_zb_U = 0.001
periods_zb_U = 10
steps_zb_U = 500
batch_size_zb_U = 100
units_zb_U = [10,10]
units_zb_U_class = [10,10,10]
n_pca_comps_dual_bounce = 10
n_clusters_dual_bounce = 6
min_cluster_size_dual_bounce = 10
th_dual_bounce = 0.001
periods_dual_bounce = 20
steps_dual_bounce = 1000
batch_size_dual_bounce = 100
units_dual_bounce = [10,10]
units_dual_bounce_class = [10,10,10]
task_1_dataframe = pd.read_csv(data_file,sep=",")
task_1_dataframe = task_1_dataframe.reindex(np.random.permutation(task_1_dataframe.index))
(inputs_dataframe,inputs_cols,null_in_cols,id_null_cols) = preprocess_features(task_1_dataframe)
#print("Input columns:")
#print(inputs_cols)
#print("Null columns:")
#print(null_in_cols)
#print("ID of null columns:")
#print(id_null_cols)
data_pred_mod_new = np.delete(data_pred_mod,id_null_cols)
#print("Modified data:")
#print(data_pred_mod_new)
r = randint(0,len(task_1_dataframe.index))
task_1_sample = task_1_dataframe.iloc[[r]]
cols_xf_plan_tot = [col for col in task_1_dataframe if col.startswith('xf_plan')]
cols_zf_L_plan_tot = [col for col in task_1_dataframe if col.startswith('zf_L_plan')]
cols_zf_U_plan_tot = [col for col in task_1_dataframe if col.startswith('zf_U_plan')]
cols_dual_f_plan_tot = [col for col in task_1_dataframe if col.startswith('dual_f_plan')]
cols_x_bounce_tot = [col for col in task_1_dataframe if col.startswith('x_bounce')]
cols_zb_L_tot = [col for col in task_1_dataframe if col.startswith('zb_L')]
cols_zb_U_tot = [col for col in task_1_dataframe if col.startswith('zb_U')]
cols_dual_bounce_tot = [col for col in task_1_dataframe if col.startswith('dual_bounce')]
normalized_inputs,normalized_inputs_max,normalized_inputs_min = normalize_linear_scale(inputs_dataframe)
(outputs_dataframe, null_outputs) = preprocess_targets(task_1_dataframe)
inputs_test_df= pd.DataFrame([data_pred_mod_new],columns=inputs_cols)
norm_inputs_test_df = pd.DataFrame([data_pred_mod_new],columns=inputs_cols)
#print(inputs_test_df)
for col in inputs_cols:
min_val = normalized_inputs_min[col]
max_val = normalized_inputs_max[col]
scale = (max_val - min_val) / 2.0
norm_inputs_test_df[col] = (((float(inputs_test_df[col]) - min_val) / scale) - 1.0)
#print(norm_inputs_test_df)
# plan final posture columns
cols_x_f_plan = [col for col in outputs_dataframe if col.startswith('xf_plan')]
cols_zf_L_plan = [col for col in outputs_dataframe if col.startswith('zf_L_plan')]
cols_zf_U_plan = [col for col in outputs_dataframe if col.startswith('zf_U_plan')]
cols_dual_f_plan = [col for col in outputs_dataframe if col.startswith('dual_f_plan')]
# bounce posture columns
cols_x_bounce = [col for col in outputs_dataframe if col.startswith('x_bounce')]
cols_zb_L = [col for col in outputs_dataframe if col.startswith('zb_L')]
cols_zb_U = [col for col in outputs_dataframe if col.startswith('zb_U')]
cols_dual_bounce = [col for col in outputs_dataframe if col.startswith('dual_bounce')]
outputs_xf_plan_df = outputs_dataframe[cols_x_f_plan]
outputs_zf_L_plan_df = outputs_dataframe[cols_zf_L_plan]
outputs_zf_U_plan_df = outputs_dataframe[cols_zf_U_plan]
outputs_dual_f_plan_df = outputs_dataframe[cols_dual_f_plan]
outputs_x_bounce_df = outputs_dataframe[cols_x_bounce]
outputs_zb_L_df = outputs_dataframe[cols_zb_L]
outputs_zb_U_df = outputs_dataframe[cols_zb_U]
outputs_dual_bounce_df = outputs_dataframe[cols_dual_bounce]
outputs_dual_bounce_df = outputs_dual_bounce_df.clip(lower=0.0001,upper=50)
if(print_en):
print("X_f_plan:")
print(outputs_xf_plan_df.head())
print("zf_L_plan:")
print(outputs_zf_L_plan_df.head())
print("zf_U_plan:")
print(outputs_zf_U_plan_df.head())
print("dual_f_plan:")
print(outputs_dual_f_plan_df.head())
print("X_bounce:")
print(outputs_x_bounce_df.head())
print("zb_L:")
print(outputs_zb_L_df.head())
print("zb_U:")
print(outputs_zb_U_df.head())
print("dual_bounce:")
print(outputs_dual_bounce_df.head())
if predict_xf_plan:
# ----- FINAL POSTURE SELECTION: FINAL POSTURE --------------------------------------------- #
if not outputs_xf_plan_df.empty:
outputs_xf_plan_df_max = pd.Series.from_csv(dir_path_xf_plan+"/xf_plan_max.csv",sep=',')
outputs_xf_plan_df_min = pd.Series.from_csv(dir_path_xf_plan + "/xf_plan_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
xf_plan_rdm_prediction = task_1_sample[cols_xf_plan_tot]
if (print_en_xf_plan):
print("Random xf_plan: ")
print(xf_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_xf_plan,
hidden_units=units_xf_plan_class,
model_dir=dir_path_xf_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_xf_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_xf_plan + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1,len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros,columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted NN xf_plan: ")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_xf_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
svm_regressor = joblib.load(dir_path_xf_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted SVM xf_plan: ")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_xf_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_xf_plan_df = pd.read_csv(dir_path_xf_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_f_plan = selected_cl_out_xf_plan_df.values
pca_xf_plan = decomposition.PCA(n_components=n_pca_comps_xf_plan)
pc = pca_xf_plan.fit_transform(X_f_plan)
pc_df = pd.DataFrame(data=pc, columns=cols_x_f_plan[0:n_pca_comps_xf_plan])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_xf_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
knn_regressor = joblib.load(dir_path_xf_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_xf_plan.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_xf_plan_df_max, outputs_xf_plan_df_min)
zero_data_xf_plan_tot = np.zeros(shape=(1, len(cols_xf_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_xf_plan_tot, columns=cols_xf_plan_tot)
for str in cols_xf_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
xf_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if (print_en_xf_plan):
print("Predicted KNN xf_plan: ")
print(denorm_test_predictions_tot_df)
if predict_zf_L_plan:
# ----- FINAL POSTURE SELECTION: LOWER BOUNDS --------------------------------------------- #
if not outputs_zf_L_plan_df.empty:
outputs_zf_L_plan_df_max = pd.Series.from_csv(dir_path_zf_L_plan + "/zf_L_plan_max.csv", sep=',')
outputs_zf_L_plan_df_min = pd.Series.from_csv(dir_path_zf_L_plan + "/zf_L_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zf_L_plan_rdm_prediction = task_1_sample[cols_zf_L_plan_tot]
if (print_en_zf_L_plan):
print("Random zf_L_plan: ")
print(zf_L_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
'''
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zf_L_plan,
hidden_units=units_zf_L_plan_class,
model_dir=dir_path_zf_L_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
'''
n_cluster = 0
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zf_L_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zf_L_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted NN zf_L_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
'''
svm_classifier = joblib.load(dir_path_zf_L_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
'''
n_cluster = 0
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zf_L_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted SVM zf_L_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
'''
knn_classifier = joblib.load(dir_path_zf_L_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
'''
n_cluster = 0
selected_cl_in_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_L_plan_df = pd.read_csv(dir_path_zf_L_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_L_plan_df.columns.values)
dim = len(selected_cl_out_zf_L_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_L_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_L_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_L_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_L_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_L_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zf_L_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_L_plan_df_max, outputs_zf_L_plan_df_min)
zero_data_zf_L_tot = np.zeros(shape=(1, len(cols_zf_L_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_L_tot, columns=cols_zf_L_plan_tot)
for str in cols_zf_L_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_L_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_L_plan):
print("Predicted KNN zf_L_plan:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zf_L')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zf_L_plan_rdm_prediction = test_pred_df.copy()
zf_L_plan_nn_prediction = test_pred_df.copy()
zf_L_plan_svm_prediction = test_pred_df.copy()
zf_L_plan_knn_prediction = test_pred_df.copy()
if(print_en_zf_L_plan):
print("Random zf_L:")
print(test_pred_df)
print("Predicted NN zf_L:")
print(test_pred_df)
print("Predicted SVM zf_L:")
print(test_pred_df)
print("Predicted KNN zf_L:")
print(test_pred_df)
if predict_zf_U_plan:
# ----- FINAL POSTURE SELECTION: UPPER BOUNDS --------------------------------------------- #
if not outputs_zf_U_plan_df.empty:
outputs_zf_U_plan_df_max = pd.Series.from_csv(dir_path_zf_U_plan + "/zf_U_plan_max.csv", sep=',')
outputs_zf_U_plan_df_min = pd.Series.from_csv(dir_path_zf_U_plan + "/zf_U_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zf_U_plan_rdm_prediction = task_1_sample[cols_zf_U_plan_tot]
if (print_en_zf_U_plan):
print("Random zf_U_plan: ")
print(zf_U_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
'''
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zf_U_plan,
hidden_units=units_zf_U_plan_class,
model_dir=dir_path_zf_U_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
'''
n_cluster = 0
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zf_U_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zf_U_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted NN zf_U_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
'''
svm_classifier = joblib.load(dir_path_zf_U_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
'''
n_cluster = 0
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zf_U_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted SVM zf_U_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
'''
knn_classifier = joblib.load(dir_path_zf_U_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
'''
n_cluster = 0
selected_cl_in_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zf_U_plan_df = pd.read_csv(dir_path_zf_U_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zf_U_plan_df.columns.values)
dim = len(selected_cl_out_zf_U_plan_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zf_U_plan_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zf_U_plan_df.iloc[0:, j].quantile(0.75)),2)) <= th_zf_U_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zf_U_plan_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zf_U_plan_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zf_U_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zf_U_plan_df_max, outputs_zf_U_plan_df_min)
zero_data_zf_U_tot = np.zeros(shape=(1, len(cols_zf_U_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zf_U_tot, columns=cols_zf_U_plan_tot)
for str in cols_zf_U_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zf_U_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zf_U_plan):
print("Predicted KNN zf_U_plan:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zf_U')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zf_U_plan_rdm_prediction = test_pred_df.copy()
zf_U_plan_nn_prediction = test_pred_df.copy()
zf_U_plan_svm_prediction = test_pred_df.copy()
zf_U_plan_knn_prediction = test_pred_df.copy()
if(print_en_zf_U_plan):
print("Random zf_U:")
print(test_pred_df)
print("Predicted NN zf_U:")
print(test_pred_df)
print("Predicted SVM zf_U:")
print(test_pred_df)
print("Predicted KNN zf_U:")
print(test_pred_df)
if predict_dual_f_plan:
# ----- FINAL POSTURE SELECTION: DUAL VARIABLES --------------------------------------------- #
if not outputs_dual_f_plan_df.empty:
outputs_dual_f_plan_df_max = pd.Series.from_csv(dir_path_dual_f_plan + "/dual_f_plan_max.csv", sep=',')
outputs_dual_f_plan_df_min = pd.Series.from_csv(dir_path_dual_f_plan + "/dual_f_plan_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
dual_f_plan_rdm_prediction = task_1_sample[cols_dual_f_plan_tot]
if (print_en_dual_f_plan):
print("Random dual_f_plan: ")
print(dual_f_plan_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_dual_f_plan,
hidden_units=units_dual_f_plan_class,
model_dir=dir_path_dual_f_plan+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_dual_f_plan,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_dual_f_plan + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted NN dual_f_plan:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_dual_f_plan + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_dual_f_plan + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted SVM dual_f_plan:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_dual_f_plan + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_f_plan_df = pd.read_csv(dir_path_dual_f_plan+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#dual_f_plan = selected_cl_out_dual_f_plan_df.values
#pca_dual_f_plan = decomposition.PCA(n_components=n_pca_comps_dual_f_plan)
#pc = pca_dual_f_plan.fit_transform(dual_f_plan)
#pc_df = pd.DataFrame(data=pc, columns=cols_dual_f_plan[0:n_pca_comps_dual_f_plan])
pc_df = selected_cl_out_dual_f_plan_df
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_f_plan):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_dual_f_plan + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
#test_predictions = test_predictions_df.values
#test_predictions_proj = pca_dual_f_plan.inverse_transform(test_predictions)
#test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_f_plan)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_dual_f_plan_df_max, outputs_dual_f_plan_df_min)
zero_data_dual_f_tot = np.zeros(shape=(1, len(cols_dual_f_plan_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_f_tot, columns=cols_dual_f_plan_tot)
for str in cols_dual_f_plan_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_f_plan_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_f_plan):
print("Predicted KNN dual_f_plan:")
print(denorm_test_predictions_tot_df)
if predict_x_bounce:
# ----- BOUNCE POSTURE SELECTION: BOUNCE POSTURE --------------------------------------------- #
if not outputs_x_bounce_df.empty:
outputs_x_bounce_df_max = pd.Series.from_csv(dir_path_x_bounce+"/x_bounce_max.csv",sep=',')
outputs_x_bounce_df_min = pd.Series.from_csv(dir_path_x_bounce + "/x_bounce_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
x_bounce_rdm_prediction = task_1_sample[cols_x_bounce_tot]
if (print_en_x_bounce):
print("Random x_bounce: ")
print(x_bounce_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_x_bounce,
hidden_units=units_x_bounce_class,
model_dir=dir_path_x_bounce+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#print("Cluster:")
#print(n_cluster)
# TO DO
#n_comps = n_pca_comps_x_bounce
#if (n_cluster==2 or n_cluster==5):
# n_comps = n_pca_comps_x_bounce - 3
#elif(n_cluster==0 or n_cluster==3 or n_cluster==4):
# n_comps = n_pca_comps_x_bounce - 2
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_x_bounce,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_x_bounce + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1,len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros,columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_nn_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted NN x_bounce:")
print(denorm_test_predictions_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_x_bounce + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_x_bounce + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_svm_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted SVM x_bounce:")
print(denorm_test_predictions_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_x_bounce + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_x_bounce_df = pd.read_csv(dir_path_x_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
X_bounce = selected_cl_out_x_bounce_df.values
pca_x_bounce = decomposition.PCA(n_components=n_pca_comps_x_bounce)
pc = pca_x_bounce.fit_transform(X_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_x_bounce[0:n_pca_comps_x_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_x_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_x_bounce + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_x_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_x_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_x_bounce_df_max, outputs_x_bounce_df_min)
zero_data_x_bounce_tot = np.zeros(shape=(1, len(cols_x_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_x_bounce_tot, columns=cols_x_bounce_tot)
for str in cols_x_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
x_bounce_knn_prediction = denorm_test_predictions_df.copy()
if(print_en_x_bounce):
print("Predicted KNN x_bounce:")
print(denorm_test_predictions_df)
if predict_zb_L:
# ---------------- BOUNCE POSTURE SELECTION: LOWER BOUNDS --------------------------------------------- #
if not outputs_zb_L_df.empty:
outputs_zb_L_df_max = pd.Series.from_csv(dir_path_zb_L + "/zb_L_max.csv", sep=',')
outputs_zb_L_df_min = pd.Series.from_csv(dir_path_zb_L + "/zb_L_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zb_L_rdm_prediction = task_1_sample[cols_zb_L_tot]
if (print_en_zb_L):
print("Random zb_L: ")
print(zb_L_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zb_L,
hidden_units=units_zb_L_class,
model_dir=dir_path_zb_L+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zb_L,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zb_L + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted NN zb_L:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zb_L + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zb_L + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted SVM zb_L:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zb_L + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_L_df = pd.read_csv(dir_path_zb_L+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_L_df.columns.values)
dim = len(selected_cl_out_zb_L_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_L_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_L_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_L):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_L_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_L_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zb_L + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_L_df_max, outputs_zb_L_df_min)
zero_data_zb_L_tot = np.zeros(shape=(1, len(cols_zb_L_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_L_tot, columns=cols_zb_L_tot)
for str in cols_zb_L_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_L_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_L):
print("Predicted KNN zb_L:")
print(denorm_test_predictions_tot_df)
if predict_zb_U:
# ----- BOUNCE POSTURE SELECTION: UPPER BOUNDS --------------------------------------------- #
if not outputs_zb_U_df.empty:
outputs_zb_U_df_max = pd.Series.from_csv(dir_path_zb_U + "/zb_U_max.csv", sep=',')
outputs_zb_U_df_min = pd.Series.from_csv(dir_path_zb_U + "/zb_U_min.csv", sep=',')
# ------------------------- Random ---------------------------------------- #
zb_U_rdm_prediction = task_1_sample[cols_zb_U_tot]
if (print_en_zb_U):
print("Random zb_U: ")
print(zb_U_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_zb_U,
hidden_units=units_zb_U_class,
model_dir=dir_path_zb_U+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
nn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_zb_U,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_zb_U + "/cluster" + repr(n_cluster)+"/nn"
)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted NN zb_U:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_zb_U + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
svm_regressor = joblib.load(dir_path_zb_U + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted SVM zb_U:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_zb_U + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_zb_U_df = pd.read_csv(dir_path_zb_U+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
col_names = list(selected_cl_out_zb_U_df.columns.values)
dim = len(selected_cl_out_zb_U_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1, len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(selected_cl_out_zb_U_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.25) - selected_cl_out_zb_U_df.iloc[0:, j].quantile(0.75)), 2)) <= th_zb_U):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), selected_cl_out_zb_U_df.iloc[0:, j].mean())], axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(selected_cl_out_zb_U_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim != 0):
knn_regressor = joblib.load(dir_path_zb_U + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
denorm_test_predictions_df = denormalize_linear_scale(test_predictions_df, outputs_zb_U_df_max, outputs_zb_U_df_min)
zero_data_zb_U_tot = np.zeros(shape=(1, len(cols_zb_U_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_zb_U_tot, columns=cols_zb_U_tot)
for str in cols_zb_U_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
zb_U_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_zb_U):
print("Predicted KNN zb_U:")
print(denorm_test_predictions_tot_df)
else:
col_names = [col for col in null_outputs if col.startswith('zb_U')]
zeros = np.zeros(shape=(1,len(col_names)))
test_pred_df = pd.DataFrame(zeros,columns=col_names)
zb_U_rdm_prediction = test_pred_df.copy()
zb_U_nn_prediction = test_pred_df.copy()
zb_U_svm_prediction = test_pred_df.copy()
zb_U_knn_prediction = test_pred_df.copy()
if(print_en_zb_U):
print("Random zb_U:")
print(test_pred_df)
print("Predicted NN zb_U:")
print(test_pred_df)
print("Predicted SVM zb_U:")
print(test_pred_df)
print("Predicted KNN zb_U:")
print(test_pred_df)
if predict_dual_bounce:
# ----- BOUNCE POSTURE SELECTION: DUAL VARIABLES --------------------------------------------- #
if not outputs_dual_bounce_df.empty:
outputs_dual_bounce_df_max = pd.Series.from_csv(dir_path_dual_bounce+"/dual_bounce_max.csv",sep=',')
outputs_dual_bounce_df_min = pd.Series.from_csv(dir_path_dual_bounce + "/dual_bounce_min.csv",sep=',')
# ------------------------- Random ---------------------------------------- #
dual_bounce_rdm_prediction = task_1_sample[cols_dual_bounce_tot]
if (print_en_dual_bounce):
print("Random dual_bounce: ")
print(dual_bounce_rdm_prediction)
# ------------------------- Neural Network ---------------------------------------- #
nn_classifier = tf.estimator.DNNClassifier(
feature_columns=construct_feature_columns(norm_inputs_test_df),
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate_class),
n_classes=n_clusters_dual_bounce,
hidden_units=units_dual_bounce_class,
model_dir=dir_path_dual_bounce+"/classification/nn"
)
targets_df = pd.DataFrame([[0.0]])
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df,
num_epochs=1,
shuffle=False)
test_probabilities = nn_classifier.predict(input_fn=predict_test_input_fn)
test_pred = np.array([item['class_ids'][0] for item in test_probabilities])
n_cluster = test_pred[0] # the input belongs to this cluster
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
#print("Cluster number:")
#print(n_cluster)
#n_comps = n_pca_comps_dual_bounce
#if(n_cluster==0):
# n_comps = n_pca_comps_dual_bounce - 5
#elif (n_cluster==2 or n_cluster==3):
# n_comps = n_pca_comps_dual_bounce - 7
#elif(n_cluster==4):
# n_comps = n_pca_comps_dual_bounce - 6
#elif(n_cluster==5):
# n_comps = n_pca_comps_dual_bounce - 4
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
nn_regressor = tf.estimator.DNNRegressor(feature_columns=construct_feature_columns(norm_inputs_test_df),
hidden_units=units_dual_bounce,
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
label_dimension=ldim,
model_dir=dir_path_dual_bounce + "/cluster" + repr(n_cluster)+"/nn"
)
tar_zeros = np.zeros(shape=(1, len(col_names_1)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names_1)
predict_test_input_fn = lambda: my_input_fn(norm_inputs_test_df,
targets_df[col_names_1],
num_epochs=1,
shuffle=False)
test_predictions_2 = nn_regressor.predict(input_fn=predict_test_input_fn)
test_predictions_2 = np.array([item['predictions'][0:ldim] for item in test_predictions_2])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_nn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted NN dual_bounce:")
print(denorm_test_predictions_tot_df)
norm_inputs_test_list = np.array(norm_inputs_test_df.values).tolist()
# ------------------------- Support Vector Machines ---------------------------------------- #
svm_classifier = joblib.load(dir_path_dual_bounce + "/classification/svm/svm_clf.joblib")
test_pred = svm_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
svm_regressor = joblib.load(dir_path_dual_bounce + "/cluster"+repr(n_cluster)+"/svm/svm_reg.joblib")
test_predictions_2 = svm_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_svm_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted SVM dual_bounce:")
print(denorm_test_predictions_tot_df)
# ------------------------- K-Nearest Neighbors ---------------------------------------- #
knn_classifier = joblib.load(dir_path_dual_bounce + "/classification/knn/knn_clf.joblib")
test_pred = knn_classifier.predict(norm_inputs_test_list)
n_cluster = test_pred[0]
selected_cl_in_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/inputs.csv",sep=',')
selected_cl_out_dual_bounce_df = pd.read_csv(dir_path_dual_bounce+"/cluster"+repr(n_cluster)+"/outputs.csv",sep=',')
Dual_bounce = selected_cl_out_dual_bounce_df.values
pca_dual_bounce = decomposition.PCA(n_components=n_pca_comps_dual_bounce)
pc = pca_dual_bounce.fit_transform(Dual_bounce)
pc_df = pd.DataFrame(data=pc, columns=cols_dual_bounce[0:n_pca_comps_dual_bounce])
col_names = list(pc_df.columns.values)
dim = len(pc_df.columns.values)
ldim = dim
test_predictions_1 = np.array([])
test_predictions_2 = []
test_predictions_df = pd.DataFrame()
test_predictions_df_1 = pd.DataFrame()
test_predictions_df_2 = pd.DataFrame()
tar_zeros = np.zeros(shape=(1,len(col_names)))
targets_df = pd.DataFrame(tar_zeros, columns=col_names)
test_pred_col_names_1 = []
col_names_1 = list(pc_df.columns.values)
for j in range(0, dim):
if (math.sqrt(math.pow((pc_df.iloc[0:, j].quantile(0.25) - pc_df.iloc[0:, j].quantile(0.75)),2)) <= th_dual_bounce):
if (test_predictions_1.size == 0):
test_predictions_1 = np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())
else:
test_predictions_1 = np.concatenate([test_predictions_1, np.full((targets_df.shape[0], 1), pc_df.iloc[0:, j].mean())],axis=1)
ldim = ldim - 1
test_pred_col_names_1.append(pc_df.columns[j])
for str in test_pred_col_names_1:
col_names_1.remove(str)
if (test_predictions_1.size != 0):
test_predictions_df_1 = pd.DataFrame(data=test_predictions_1[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=test_pred_col_names_1)
if (ldim!=0):
knn_regressor = joblib.load(dir_path_dual_bounce + "/cluster"+repr(n_cluster)+"/knn/knn_reg.joblib")
test_predictions_2 = knn_regressor.predict(norm_inputs_test_df.iloc[:,0:ldim])
test_predictions_df_2 = pd.DataFrame(data=test_predictions_2[0:, 0:], # values
index=norm_inputs_test_df.index,
columns=col_names_1)
if (test_predictions_df_1.empty):
test_predictions_df = test_predictions_df_2
elif (test_predictions_df_2.empty):
test_predictions_df = test_predictions_df_1
else:
for str in col_names:
if str in test_predictions_df_1:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_1[str]], axis=1)
elif str in test_predictions_df_2:
test_predictions_df = pd.concat([test_predictions_df, test_predictions_df_2[str]], axis=1)
test_predictions = test_predictions_df.values
test_predictions_proj = pca_dual_bounce.inverse_transform(test_predictions)
test_proj_df = pd.DataFrame(data=test_predictions_proj, columns=cols_dual_bounce)
denorm_test_predictions_df = denormalize_linear_scale(test_proj_df, outputs_dual_bounce_df_max, outputs_dual_bounce_df_min)
zero_data_dual_bounce_tot = np.zeros(shape=(1, len(cols_dual_bounce_tot)))
denorm_test_predictions_tot_df = pd.DataFrame(zero_data_dual_bounce_tot, columns=cols_dual_bounce_tot)
for str in cols_dual_bounce_tot:
if str in denorm_test_predictions_df:
denorm_test_predictions_tot_df[str] = denorm_test_predictions_df[str].values
dual_bounce_knn_prediction = denorm_test_predictions_tot_df.copy()
if(print_en_dual_bounce):
print("Predicted KNN dual_bounce:")
print(denorm_test_predictions_tot_df)
# ------------------- Write down the prediction of the results ----------------------------------- #
pred_file = open(pred_file_path, "w")
pred_file.write("#### Dual variables and solutions of the optimization problems ####\n")
# ----------------- Random -------------------------- #
pred_file.write("### Warm start with Random ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_rdm_plan=")
xf_plan_size = len(xf_plan_rdm_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_rdm_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_rdm_plan=")
zf_L_plan_size = len(zf_L_plan_rdm_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_rdm_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_rdm_plan=")
zf_U_plan_size = len(zf_U_plan_rdm_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_rdm_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_rdm_plan=")
dual_f_plan_size = len(dual_f_plan_rdm_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_rdm_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_rdm_bounce=")
x_bounce_size = len(x_bounce_rdm_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_rdm_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_rdm_bounce=")
zb_L_size = len(zb_L_rdm_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_rdm_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_rdm_bounce=")
zb_U_size = len(zb_U_rdm_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_rdm_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_rdm_bounce=")
dual_bounce_size = len(dual_bounce_rdm_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_rdm_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- Neural Network -------------------------- #
pred_file.write("### Warm start with Neural Network ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_nn_plan=")
xf_plan_size = len(xf_plan_nn_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_nn_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_nn_plan=")
zf_L_plan_size = len(zf_L_plan_nn_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_nn_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_nn_plan=")
zf_U_plan_size = len(zf_U_plan_nn_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_nn_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_nn_plan=")
dual_f_plan_size = len(dual_f_plan_nn_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_nn_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_nn_bounce=")
x_bounce_size = len(x_bounce_nn_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_nn_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_nn_bounce=")
zb_L_size = len(zb_L_nn_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_nn_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_nn_bounce=")
zb_U_size = len(zb_U_nn_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_nn_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_nn_bounce=")
dual_bounce_size = len(dual_bounce_nn_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_nn_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- Support Vector Machines -------------------------- #
pred_file.write("### Warm start with Support Vector Machines ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_svm_plan=")
xf_plan_size = len(xf_plan_svm_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_svm_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_svm_plan=")
zf_L_plan_size = len(zf_L_plan_svm_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_svm_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_svm_plan=")
zf_U_plan_size = len(zf_U_plan_svm_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_svm_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_svm_plan=")
dual_f_plan_size = len(dual_f_plan_svm_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_svm_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_svm_bounce=")
x_bounce_size = len(x_bounce_svm_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_svm_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_svm_bounce=")
zb_L_size = len(zb_L_svm_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_svm_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_svm_bounce=")
zb_U_size = len(zb_U_svm_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_svm_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_svm_bounce=")
dual_bounce_size = len(dual_bounce_svm_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_svm_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
# ----------------- K-Nearest Neighbors -------------------------- #
pred_file.write("### Warm start with K-Nearest Neighbors ###\n")
pred_file.write("## Plan target posture selection data ##\n")
pred_file.write("X_knn_plan=")
xf_plan_size = len(xf_plan_knn_prediction.columns)
for i in range(0,xf_plan_size):
pred_file.write("%.6f" % xf_plan_knn_prediction.iloc[0,i])
if not (i == xf_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_knn_plan=")
zf_L_plan_size = len(zf_L_plan_knn_prediction.columns)
for i in range(0,zf_L_plan_size):
pred_file.write("%.6f" % zf_L_plan_knn_prediction.iloc[0,i])
if not (i == zf_L_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_knn_plan=")
zf_U_plan_size = len(zf_U_plan_knn_prediction.columns)
for i in range(0,zf_U_plan_size):
pred_file.write("%.6f" % zf_U_plan_knn_prediction.iloc[0,i])
if not (i == zf_U_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_knn_plan=")
dual_f_plan_size = len(dual_f_plan_knn_prediction.columns)
for i in range(0,dual_f_plan_size):
pred_file.write("%.6f" % dual_f_plan_knn_prediction.iloc[0,i])
if not (i == dual_f_plan_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("## Bounce posture selection data ##\n")
pred_file.write("X_knn_bounce=")
x_bounce_size = len(x_bounce_knn_prediction.columns)
for i in range(0,x_bounce_size):
pred_file.write("%.6f" % x_bounce_knn_prediction.iloc[0,i])
if not (i == x_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZL_knn_bounce=")
zb_L_size = len(zb_L_knn_prediction.columns)
for i in range(0,zb_L_size):
pred_file.write("%.6f" % zb_L_knn_prediction.iloc[0,i])
if not (i == zb_L_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("ZU_knn_bounce=")
zb_U_size = len(zb_U_knn_prediction.columns)
for i in range(0,zb_U_size):
pred_file.write("%.6f" % zb_U_knn_prediction.iloc[0,i])
if not (i == zb_U_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.write("Dual_knn_bounce=")
dual_bounce_size = len(dual_bounce_knn_prediction.columns)
for i in range(0,dual_bounce_size):
pred_file.write("%.6f" % dual_bounce_knn_prediction.iloc[0,i])
if not (i == dual_bounce_size -1):
pred_file.write("|")
pred_file.write("\n")
pred_file.close()
| mit |
Adai0808/BuildingMachineLearningSystemsWithPython | ch09/01_fft_based_classifier.py | 24 | 3740 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_pr, plot_roc, plot_confusion_matrix, GENRE_LIST
from fft import read_fft
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_pr(pr_scores[label][median], desc, precisions[label][median],
recalls[label][median], label='%s vs rest' % genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_fft(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg FFT", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "fft",
"Confusion matrix of an FFT based classifier")
| mit |
idlead/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
baptistelabat/kiteEnergySimulator | Carousel/Carousel.py | 1 | 3554 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Licensed under the MIT License,
# https://github.com/baptistelabat/kiteEnergySimulator
# @author: Charles Spraul
# Created on Fri Mar 14 16:04:44 2014
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
import numpy as np
from numpy import pi
import sys
sys.path.append('../Optim')
import Wind_Power_Car as WPC
cmptKA = 0
cmptPower = 0
global n
def Kite_Attitude(U, WD):# Define loads exerced on the kite according to the Wind direction WD. U is the trolley speed
global cmptKA, cmptPower, n
cmptKA += 1
AnglesOpti = (0., 0.)
Pmax = -1e15
# Alpha angle
amin = -pi/20.
amax = pi/2.
da = amax-amin
# Beta angle
bmin = -pi/2.
bmax = pi/2.
db = bmax-bmin
while db > 0.001: # Convergence criterion
# Loop on alpha and beta angles and find maxima
for a in np.linspace(amin + da/(n+1), amax - da/(n+1), n):
for b in np.linspace(bmin + db/(n+1), bmax - db/(n+1), n):
Angles = (a, b)
[Fx, Fy, Fz, X, Y, Z] = WPC.ComputeEquilibrium(Angles, U, WD)
P = WPC.Power(Fx, U)
cmptPower += 1
if (P>Pmax and Z>25): # Do not allow solution where kite is too close to the ground
AnglesOpti = Angles
Pmax = P
Polar = (Angles[0], Angles[1], X, Y, Z, Fx, Fy, Fz, P)
# Compute the new range where to find optimum
amin = AnglesOpti[0] - da/(n+1)
amax = AnglesOpti[0] + da/(n+1)
da = amax - amin
bmin = AnglesOpti[1] - db/(n+1)
bmax = AnglesOpti[1] + db/(n+1)
db = bmax-bmin
return Polar # Polar contains all the information about loads, position, and angles
def f(U): # f is the function used in the optimisation of the trolley speed.
global npt
P = 0.
Polar = np.zeros((9, npt))
for wd in np.arange(npt):
Polar[:, wd] = Kite_Attitude(U, 360/npt*wd*pi/180.)
P = P + Polar[8, wd]/npt
return(-P)
n = 15.
npt = 60 # number of discretization points for the carrousel circle
Uopt = minimize_scalar(f, bounds=[1., 500.], method='Bounded') #optimisation of
print'Uopt', Uopt.x
print'La fonction Kite_Allure a ete lancee', cmptKA,'fois (', cmptKA/npt, 'vitesses testees)'
print'La fonction Power a ete lancee', cmptPower,'fois, soit', cmptPower/cmptKA, 'fois en moyenne'
P = 0. #P is the Power
Polar = np.zeros((9, npt))
for wd in np.arange(npt):
Polar[:, wd] = Kite_Attitude(Uopt.x, 360/npt*wd*pi/180.)
P = P + Polar[8, wd]/npt
print'Average power (W) =', P
plt.figure(1)
plt.subplot(1,3,1)
plt.plot(360/npt*np.arange(npt), Polar[0,:]*180/pi, 'bx', label='alpha') # alpha
plt.plot(360/npt*np.arange(npt), Polar[1,:]*180/pi, 'rx', label='beta') # beta
plt.xlabel('Wind Direction (deg)')
plt.ylabel('Optimal Kite Angles (deg)')
plt.axis([0, 360, -90, 90])
plt.legend()
plt.subplot(1,3,2)
plt.plot(360/npt*np.arange(npt), Polar[4,:], 'y') # altitude du kite
plt.xlabel('Wind Direction (deg)')
plt.ylabel('Altitude of the Kite (m)')
plt.xlim(0, 360)
plt.subplot(1, 3, 3)
plt.plot(360/npt*np.arange(npt), Polar[8, :], 'go') # Power
plt.xlabel('Wind Direction (deg)')
plt.ylabel('Power (W)')
plt.xlim(0, 360)
plt.show()
| mit |
wavelets/lifelines | tests/test_plotting.py | 3 | 7586 | from __future__ import print_function
import os
import pytest
import numpy as np
from lifelines.estimation import NelsonAalenFitter, KaplanMeierFitter, AalenAdditiveFitter
from lifelines.generate_datasets import generate_random_lifetimes, generate_hazard_rates
from lifelines.plotting import plot_lifetimes
@pytest.mark.plottest
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
class TestPlotting():
def setup_method(self, method):
pytest.importorskip("matplotlib")
from matplotlib import pyplot as plt
self.plt = plt
def test_negative_times_still_plots(self):
n = 40
T = np.linspace(-2, 3, n)
C = np.random.randint(2, size=n)
kmf = KaplanMeierFitter()
kmf.fit(T, C)
ax = kmf.plot()
self.plt.title('test_negative_times_still_plots')
self.plt.show()
return
def test_kmf_plotting(self):
data1 = np.random.exponential(10, size=(100))
data2 = np.random.exponential(2, size=(200, 1))
data3 = np.random.exponential(4, size=(500, 1))
kmf = KaplanMeierFitter()
kmf.fit(data1, label='test label 1')
ax = kmf.plot()
kmf.fit(data2, label='test label 2')
kmf.plot(ax=ax)
kmf.fit(data3, label='test label 3')
kmf.plot(ax=ax)
self.plt.title("test_kmf_plotting")
self.plt.show()
return
def test_kmf_with_risk_counts(self):
data1 = np.random.exponential(10, size=(100))
kmf = KaplanMeierFitter()
kmf.fit(data1)
kmf.plot(at_risk_counts=True)
self.plt.title("test_kmf_with_risk_counts")
self.plt.show()
def test_naf_plotting_with_custom_colours(self):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(500))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(color="r")
naf.fit(data2)
naf.plot(ax=ax, c="k")
self.plt.title('test_naf_plotting_with_custom_coloirs')
self.plt.show()
return
def test_aalen_additive_plot(self):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline)
C = np.random.binomial(1, 1., size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.plot(iloc=slice(0, aaf.cumulative_hazards_.shape[0] - 100))
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_plot')
self.plt.show()
return
def test_aalen_additive_smoothed_plot(self):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 150, 5000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline) + 0.1 * np.random.uniform(size=(n, 1))
C = np.random.binomial(1, 0.8, size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.smoothed_hazards_(1).iloc[0:aaf.cumulative_hazards_.shape[0] - 500].plot()
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_smoothed_plot')
self.plt.show()
return
def test_naf_plotting_slice(self):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(ix=slice(0, None))
naf.fit(data2)
naf.plot(ax=ax, ci_force_lines=True, iloc=slice(100, 180))
self.plt.title('test_naf_plotting_slice')
self.plt.show()
return
def test_plot_lifetimes_calendar(self):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
current = 10
birthtimes = current * np.random.uniform(size=(N,))
T, C = generate_random_lifetimes(hz, t, size=N, censor=current - birthtimes)
plot_lifetimes(T, event_observed=C, birthtimes=birthtimes)
def test_plot_lifetimes_relative(self):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
T, C = generate_random_lifetimes(hz, t, size=N, censor=True)
plot_lifetimes(T, event_observed=C)
def test_naf_plot_cumulative_hazard(self):
data1 = np.random.exponential(5, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot()
naf.plot_cumulative_hazard(ax=ax, ci_force_lines=True)
self.plt.title("I should have plotted the same thing, but different styles + color!")
self.plt.show()
return
def test_naf_plot_cumulative_hazard_bandwidth_2(self):
data1 = np.random.exponential(5, size=(2000, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=1., ix=slice(0, 7.))
self.plt.title('test_naf_plot_cumulative_hazard_bandwidth_2')
self.plt.show()
return
def test_naf_plot_cumulative_hazard_bandwith_1(self):
data1 = np.random.exponential(5, size=(2000, 1)) ** 2
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=5., iloc=slice(0, 1700))
self.plt.title('test_naf_plot_cumulative_hazard_bandwith_1')
self.plt.show()
return
def test_show_censor_with_discrete_date(self):
T = np.random.binomial(20, 0.1, size=100)
C = np.random.binomial(1, 0.8, size=100)
kmf = KaplanMeierFitter()
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_discrete_date')
self.plt.show()
return
def test_show_censor_with_index_0(self):
T = np.random.binomial(20, 0.9, size=100) # lifelines should auto put a 0 in.
C = np.random.binomial(1, 0.8, size=100)
kmf = KaplanMeierFitter()
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_index_0')
self.plt.show()
return
def test_flat_style_and_marker(self):
data1 = np.random.exponential(10, size=200)
data2 = np.random.exponential(2, size=200)
C1 = np.random.binomial(1, 0.9, size=200)
C2 = np.random.binomial(1, 0.95, size=200)
kmf = KaplanMeierFitter()
kmf.fit(data1, C1, label='test label 1')
ax = kmf.plot(flat=True, censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
kmf.fit(data2, C2, label='test label 2')
kmf.plot(ax=ax, censor_styles={'marker': 'o', 'ms': 7}, flat=True)
self.plt.title("testing kmf flat styling + marker")
self.plt.show()
return
def test_flat_style_no_censor(self):
data1 = np.random.exponential(10, size=200)
kmf = KaplanMeierFitter()
kmf.fit(data1, label='test label 1')
ax = kmf.plot(flat=True, censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
self.plt.title('test_flat_style_no_censor')
self.plt.show()
return
| mit |
vivekmishra1991/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 53 | 21055 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
m = "'init' must be 'pca', 'random' or a NumPy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitraryily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitraryily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
Gillu13/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
antiface/mne-python | examples/preprocessing/plot_resample.py | 12 | 3364 | """
===============
Resampling data
===============
When performing experiments where timing is critical, a signal with a high
sampling rate is desired. However, having a signal with a much higher sampling
rate than is necessary needlessly consumes memory and slows down computations
operating on the data.
This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold
reduction in data size, at the cost of an equal loss of temporal resolution.
"""
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
#
from __future__ import print_function
from matplotlib import pyplot as plt
import mne
from mne.io import Raw
from mne.datasets import sample
###############################################################################
# Setting up data paths and loading raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = Raw(raw_fname, preload=True)
###############################################################################
# Since downsampling reduces the timing precision of events, we recommend
# first extracting epochs and downsampling the Epochs object:
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.resample(100, copy=True)
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
###############################################################################
# When resampling epochs is unwanted or impossible, for example when the data
# doesn't fit into memory or your analysis pipeline doesn't involve epochs at
# all, the alternative approach is to resample the continous data. This
# can also be done on non-preloaded data.
# Resample to 300 Hz
raw_resampled = raw.resample(300, copy=True)
###############################################################################
# Because resampling also affects the stim channels, some trigger onsets might
# be lost in this case. While MNE attempts to downsample the stim channels in
# an intelligent manner to avoid this, the recommended approach is to find
# events on the original data before downsampling.
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (generates warning)
raw_resampled = raw.resample(100, copy=True)
print('Number of events after resampling:',
len(mne.find_events(raw_resampled)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.resample(100, events=events, copy=True)
print('Number of events after resampling:', len(events_resampled))
| bsd-3-clause |
glemaitre/UnbalancedDataset | examples/combine/plot_comparison_combine.py | 2 | 4390 | """
====================================================================
Comparison of the combination of over- and under-sampling algorithms
====================================================================
This example shows the effect of applying an under-sampling algorithms after
SMOTE over-sampling. In the literature, Tomek's link and edited nearest
neighbours are the two methods which have been used and are available in
imbalanced-learn.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.svm import LinearSVC
from imblearn.pipeline import make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTEENN, SMOTETomek
print(__doc__)
###############################################################################
# The following function will be used to create toy dataset. It using the
# ``make_classification`` from scikit-learn but fixing some parameters.
def create_dataset(n_samples=1000, weights=(0.01, 0.01, 0.98), n_classes=3,
class_sep=0.8, n_clusters=1):
return make_classification(n_samples=n_samples, n_features=2,
n_informative=2, n_redundant=0, n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep, random_state=0)
###############################################################################
# The following function will be used to plot the sample space after resampling
# to illustrate the characteristic of an algorithm.
def plot_resampling(X, y, sampling, ax):
X_res, y_res = sampling.fit_sample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor='k')
# make nice plotting
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
return Counter(y_res)
###############################################################################
# The following function will be used to plot the decision function of a
# classifier given some data.
def plot_decision_function(X, y, clf, ax):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor='k')
###############################################################################
# ``SMOTE`` allows to generate samples. However, this method of over-sampling
# does not have any knowledge regarding the underlying distribution. Therefore,
# some noisy samples can be generated, e.g. when the different classes cannot
# be well separated. Hence, it can be beneficial to apply an under-sampling
# algorithm to clean the noisy samples. Two methods are usually used in the
# literature: (i) Tomek's link and (ii) edited nearest neighbours cleaning
# methods. Imbalanced-learn provides two ready-to-use samplers ``SMOTETomek``
# and ``SMOTEENN``. In general, ``SMOTEENN`` cleans more noisy data than
# ``SMOTETomek``.
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2,
figsize=(15, 25))
X, y = create_dataset(n_samples=1000, weights=(0.1, 0.2, 0.7))
ax_arr = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
for ax, sampler in zip(ax_arr, (
SMOTE(random_state=0),
SMOTEENN(random_state=0),
SMOTETomek(random_state=0))):
clf = make_pipeline(sampler, LinearSVC())
clf.fit(X, y)
plot_decision_function(X, y, clf, ax[0])
ax[0].set_title('Decision function for {}'.format(
sampler.__class__.__name__))
plot_resampling(X, y, sampler, ax[1])
ax[1].set_title('Resampling using {}'.format(
sampler.__class__.__name__))
fig.tight_layout()
plt.show()
| mit |
jerryjiahaha/rts2 | python/rts2/gpoint.py | 3 | 52726 | # g(pl)-Point - GPLed Telescope pointing model fit, as described in paper by Marc Buie:
#
# ftp://ftp.lowell.edu/pub/buie/idl/pointing/pointing.pdf
#
# (C) 2015-2016 Petr Kubanek <petr@kubanek.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import sys
import numpy as np
import libnova
import string
from math import radians, degrees, cos, sin, tan, sqrt, atan2, acos
from lmfit import minimize, Parameters, minimizer
import re
_gem_params = ['id', 'me', 'ma', 'tf', 'ih', 'ch', 'np', 'daf', 'fo']
# name of AltAz parameters
_altaz_params = ['ia', 'tn', 'te', 'npae', 'npoa', 'ie', 'tf']
def flip_ra(a_ra, dec):
if abs(dec) > 90:
return (a_ra + 180) % 360
return a_ra
def flip_dec(a_dec, dec):
if dec > 90:
return 180 - a_dec
elif dec < -90:
return -180 - a_dec
return a_dec
def rad_flip_dec(a_dec, dec):
if dec > np.pi/2.0:
return np.pi - a_dec
elif dec < -np.pi/2.0:
return -np.pi - a_dec
return a_dec
def print_model_input(filename, first):
hdulist = fits.open(filename)
h = hdulist[0].header
w = wcs.WCS(h)
ra, dec = w.all_pix2world(2000, 2000, 0)
tar_telra = float(h['TAR_TELRA'])
tar_teldec = float(h['TAR_TELDEC'])
if first:
print("#observatory", h['SITELONG'], h['SITELAT'], h['ELEVATION'])
print((h['IMGID'], h['JD'], h['LST'], tar_telra, tar_teldec, h['AXRA'], h['AXDEC'], ra, dec))
def normalize_az_err(errs):
return np.array([x if x < 180 else x - 360 for x in errs % 360])
def normalize_ha_err(errs):
return np.array([x if x < 180 else x - 360 for x in errs % 360])
def pole_distance(dec):
"""Returns pole distance, e.g. DEC distance from north (or south) pole in degrees"""
return 90 - np.abs(np.array([flip_dec(dd, dd) for dd in dec]))
def rad_pole_distance(dec):
"""Returns pole distance, e.g. DEC distance from north (or south) pole in degrees"""
return np.pi/2.0 - np.abs(np.array([rad_flip_dec(dd, dd) for dd in dec]))
def _str_to_rad(s):
if s[-1] == 'd':
return np.radians(float(s[:-1]))
elif s[-1] == "'" or s[-1] == 'm':
return np.radians(float(s[:-1]) / 60.0)
elif s[-1] == '"' or s[-1] == 's':
return np.radians(float(s[:-1]) / 3600.0)
return float(s)
class ExtraParam:
"""Extra parameter - term for model evaluation"""
def __init__(self, axis, multi, function, params, consts):
self.axis = axis
if multi is None:
self.multi = None
else:
self.multi = _str_to_rad(multi)
# save initial multiplier
self.__initial_multi = self.multi
self.function = function
self.param = params.split(';')
self.consts = list(map(float, consts.split(';')))
def parnum(self):
"""Number of parameters this extra function uses."""
return 1
def parname(self):
return '_'.join([self.axis, self.function, '_'.join(self.param), '_'.join(map(str, self.consts)).replace('.', '_')])
def __eq__(self, e):
return self.axis == e.axis and self.function == e.function and self.param == e.param and self.consts == e.consts
def __str__(self):
return '{0}\t{1}\t{2}'.format(self.function, ';'.join(map(str, self.param)), ';'.join(map(str, self.consts)))
class DuplicatedExtra(Exception):
"""Raised when adding term already present in model terms"""
def __init__(self, argument):
super(DuplicatedExtra, self).__init__('duplicated argument:{0}'.format(argument))
self.argument = argument
class NonExistentExtra(Exception):
"""Raised when removing term not present in model terms"""
def __init__(self, argument):
super(NonExistentExtra, self).__init__('nonexistent term:{0}'.format(argument))
self.argument = argument
# Computes, output, concetanetes and plot pointing models.
class GPoint:
"""Main GPoint class. verbose verbosity of the output"""
def __init__(self, verbose=0, latitude=None, longitude=None, altitude=None):
self.aa_ha = None
self.verbose = verbose
self.lines = []
# telescope latitude - north positive
self.latitude = self.def_latitude = latitude
self.longitude = self.def_longitude = longitude
self.altitude = self.def_altitude = altitude
self.altaz = False # by default, model GEM
if latitude is not None:
self.latitude_r = np.radians(latitude)
self.best = None
self.name_map = None
# addtional terms for model - ExtraParam
self.extra = []
self.fixed = []
self.variable = None
self.modelfile = None
def equ_to_hrz(self, ha, dec):
""" Transform HA-DEC (in radians) vector to ALT-AZ (in degrees) vector"""
A = np.sin(self.latitude_r) * np.sin(dec) + np.cos(self.latitude_r) * np.cos(dec) * np.cos(ha)
alt = np.arcsin(A)
Z = np.arccos(A)
Zs = np.sin(Z)
As = (np.cos(dec) * np.sin(ha)) / Zs
Ac = (np.sin(self.latitude_r) * np.cos(dec) * np.cos(ha) - np.cos(self.latitude_r) * np.sin(dec)) / Zs
Aa = np.arctan2(As, Ac)
return np.degrees(alt), (np.degrees(Aa) + 360) % 360
def hrz_to_equ(self, az, alt):
""" Transform AZ-ALT (in radians) vector to HA-DEC (in degrees) vector"""
ha = np.arctan2(np.sin(az), (np.cos(az) + np.sin(self.latitude_r) + np.tan(alt) * np.cos(self.latitude_r)))
dec = np.sin(self.latitude_r) * np.sin(alt) - np.cos(self.latitude_r) * np.cos(alt) * np.cos(az)
dec = np.arcsin(dec)
return np.degrees(ha), np.degrees(dec)
def get_extra_val(self, e, ha, dec, az, el, num):
if e.param[num] == 'ha':
return ha
elif e.param[num] == 'dec':
return dec
elif e.param[num] == 'az':
return az
elif e.param[num] == 'el':
return el
elif e.param[num] == 'zd':
return (np.pi / 2) - el
elif e.param[num] == 'pd':
return rad_pole_distance(dec)
else:
sys.exit('unknow parameter {0}'.format(e.param[num]))
def cal_extra(self, e, axis, ha, dec, az, el):
if e.function == 'offset':
oax = self.get_extra_val(e, ha, dec, az, el, 0)
if ((oax == az).all() and axis == 'az') or ((oax == el).all() and axis == 'el'):
return np.array([1] * len(oax))
else:
return np.array([0] * len(oax))
elif e.function == 'sin':
return np.sin(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'cos':
return np.cos(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'abssin':
return np.abs(np.sin(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0)))
elif e.function == 'abscos':
return np.abs(np.cos(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0)))
elif e.function == 'tan':
return np.tan(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'csc':
return 1.0 / np.sin(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'sec':
return 1.0 / np.cos(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'cot':
return 1.0 / np.tan(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'sinh':
return np.sinh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'cosh':
return np.cosh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'tanh':
return np.tanh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'sech':
return 1.0 / np.cosh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'csch':
return 1.0 / np.sinh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'coth':
return 1.0 / np.tanh(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0))
elif e.function == 'sincos':
return np.sin(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0)) * np.cos(e.consts[1] * self.get_extra_val(e, ha, dec, az, el, 1))
elif e.function == 'sinsin':
return np.sin(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0)) * np.sin(e.consts[1] * self.get_extra_val(e, ha, dec, az, el, 1))
elif e.function == 'coscos':
return np.cos(e.consts[0] * self.get_extra_val(e, ha, dec, az, el, 0)) * np.cos(e.consts[1] * self.get_extra_val(e, ha, dec, az, el, 1))
else:
sys.exit('unknow function {0}'.format(e.function))
def add_extra(self, axis, function, params, consts):
return self.add_extra_multi(axis, None, function, params, consts)
def add_extra_multi(self, axis, multi, function, params, consts):
axis = axis.lower()
if axis == 'alt':
axis = 'el'
if axis == 'ha' or axis == 'dec' or axis == 'az' or axis == 'el':
ep = ExtraParam(axis, multi, function, params, consts)
try:
self.extra.index(ep)
raise DuplicatedExtra(ep.parname)
except ValueError:
pass
self.extra.append(ep)
return ep
else:
raise Exception('invalid axis name: {0}'.format(axis))
def remove_extra(self, pn):
"""Remove extra parameter by parameter name"""
for p in self.extra:
if p.parname() == pn:
self.extra.remove(p)
return
raise NonExistentExtra(pn)
def model_hadec(self, params, a_ha, a_dec):
ret_ha = - params['ih'] \
- params['ch'] / np.cos(a_dec) \
- params['np'] * np.tan(a_dec) \
- (params['me'] * np.sin(a_ha) - params['ma'] * np.cos(a_ha)) * np.tan(a_dec) \
- params['tf'] * np.cos(self.latitude_r) * np.sin(a_ha) / np.cos(a_dec) \
- params['daf'] * (np.sin(self.latitude_r) * np.tan(a_dec) + np.cos(self.latitude_r) * np.cos(a_ha))
ret_dec = - params['id'] \
- params['me'] * np.cos(a_ha) \
- params['ma'] * np.sin(a_ha) \
- params['tf'] * (np.cos(self.latitude_r) * np.sin(a_dec) * np.cos(a_ha) - np.sin(self.latitude_r) * np.cos(a_dec)) \
- params['fo'] * np.cos(a_ha)
for e in self.extra:
if e.axis == 'ha':
ret_ha += params[e.parname()] * self.cal_extra(e, 'ha', a_ha, a_dec, self.rad_aa_az, self.rad_aa_alt)
if e.axis == 'dec':
ret_dec += params[e.parname()] * self.cal_extra(e, 'dec', a_ha, a_dec, self.rad_aa_az, self.rad_aa_alt)
return ret_ha, ret_dec
def model_azel(self, params, a_az, a_el):
return self.model_azel_hadec(params, a_az, a_el, self.rad_aa_ha, self.rad_aa_dec)
def model_azel_hadec(self, params, a_az, a_el, a_ha, a_dec):
tan_el = np.tan(a_el)
ret_az = - params['ia'] \
+ params['tn'] * np.sin(a_az) * tan_el \
- params['te'] * np.cos(a_az) * tan_el \
- params['npae'] * tan_el \
+ params['npoa'] / np.cos(a_el)
ret_el = - params['ie'] \
+ params['tn'] * np.cos(a_az) \
+ params['te'] * np.sin(a_az) \
+ params['tf'] * np.cos(a_el)
for e in self.extra:
if e.axis == 'az':
ret_az += params[e.parname()] * self.cal_extra(e, 'az', a_ha, a_dec, a_az, a_el)
if e.axis == 'el':
ret_el += params[e.parname()] * self.cal_extra(e, 'el', a_ha, a_dec, a_az, a_el)
return ret_az, ret_el
# Fit functions.
# a_ha - target HA (hour angle)
# r_ha - calculated (real) HA
# a_dec - target DEC)
# r_dec - calculated (real) DEC
# DEC > 90 or < -90 means telescope flipped (DEC axis continues for modelling purposes)
def fit_model_hadec(self, params, a_ha, r_ha, a_dec, r_dec):
m_ha, m_dec = self.model_hadec(params, a_ha, a_dec)
return a_ha - r_ha + m_ha, a_dec - r_dec + m_dec
def fit_model_gem(self, params, a_ra, r_ra, a_dec, r_dec):
if self.verbose > 1:
print('computing', self.latitude, self.latitude_r, params, a_ra, r_ra, a_dec, r_dec)
m_ha, m_dec = self.model_hadec(params, a_ra, a_dec)
return libnova.angular_separation(np.degrees(a_ra + m_ha), np.degrees(a_dec + m_dec), np.degrees(r_ra), np.degrees(r_dec))
def fit_model_azel(self, params, a_az, r_az, a_el, r_el):
m_az, m_el = self.model_azel(params, a_az, a_el)
return a_az - r_az + m_az, a_el - r_el + m_el
def fit_model_altaz(self, params, a_az, r_az, a_el, r_el):
if self.verbose > 1:
print('computing', self.latitude, self.latitude_r, params, a_az, r_az, a_el, r_el)
m_az, m_el = self.model_azel(params, a_az, a_el)
return libnova.angular_separation(np.degrees(a_az + m_az), np.degrees(a_el + m_el), np.degrees(r_az), np.degrees(r_el))
# open file, produce model
# expected format:
# # Observation MJD LST-MNT RA-MNT DEC-MNT AXRA AXDEC RA-TRUE DEC-TRUE
# # observatory <longitude> <latitude> <altitude>
# 02a57222e0002o 57222.260012 233.8937 275.7921 77.0452 -55497734 -46831997 276.0206 77.0643
# or for alt-az
# Observation MJD LST-MNT AZ-MNT ALT-MNT AXAZ AXALT AZ-TRUE ALT-TRUE
# altaz <longitude> <latitude> <altitude>
# 02a57222e0002o 57222.260012 233.8937 275.7921 77.0452 -55497734 -46831997 276.0206 77.0643
# skip first line, use what comes next. Make correction on DEC based on axis - if above zeropoint + 90 deg, flip DEC (DEC = 180 - DEC)
def process_files(self, filenames, flips='both'):
obsmatch = re.compile('#\s*(\S*)\s+(\S*)\s+(\S*)\s+(\S*)\s*')
frmt = "astrometry"
rdata = []
for filename in filenames:
f = open(filename)
# skip first line
f.readline()
line = f.readline()
curr_lines = []
while not(line == ''):
if line[0] == '#':
m = obsmatch.match(line)
if m:
if m.group(1) in ['observatory', 'gem']:
self.altaz = False
elif m.group(1) in ['altaz']:
self.altaz = True
elif m.group(1) in ['altaz-manual']:
self.altaz = True
frmt = "manual"
else:
curr_lines.append(line.rstrip())
line = f.readline()
continue
if self.latitude is None:
self.latitude = m.group(3)
elif self.def_latitude is None and self.latitude != m.group(2):
sys.exit('Cannot (yet) perform calculation on two different latitudes: {0} {1}'.format(self.latitude, m.group(3)))
# others are not yet used..will be interesting for refraction, if included in model
if self.longitude is None:
self.longitude = float(m.group(2))
if self.altitude is None:
self.altitude = float(m.group(4))
else:
curr_lines.append(line.rstrip())
else:
curr_lines.append(line.rstrip())
self.lines.append(curr_lines)
curr_lines = []
s = line.split()
rdata.append(s[:9])
line = f.readline()
f.close()
if self.verbose:
print("Input data", rdata)
if self.latitude is None:
sys.exit("You must specify latitude! Either through --latitude option, or in input file (on #observatory line).")
self.latitude_r = np.radians(float(self.latitude))
data = []
if self.altaz:
if frmt == "manual":
data = [(float(a_az), float(a_alt), float(a_az) + float(e_az), float(a_alt) + float(e_alt), sn, float(mjd)) for sn, mjd, ra, dec, e_alt, e_az, a_alt, a_az in rdata]
else:
data = [(float(a_az), float(a_alt), float(r_az), float(r_alt), sn, float(mjd)) for sn, mjd, lst, a_az, a_alt, ax_az, ax_alt, r_az, r_alt in rdata]
else:
# data = [(float(lst) - flip_ra(float(a_ra),float(a_dec)), float(a_dec), float(lst) - float(r_ra), flip_dec(float(r_dec),float(a_dec)), sn, float(mjd)) for sn,mjd,lst,a_ra,a_dec,ax_ra,ax_dec,r_ra,r_dec in rdata]
data = [(float(lst) - float(a_ra), float(a_dec), float(lst) - flip_ra(float(r_ra), float(a_dec)), flip_dec(float(r_dec), float(a_dec)), sn, float(mjd)) for sn, mjd, lst, a_ra, a_dec, ax_ra, ax_dec, r_ra, r_dec in rdata]
if flips == 'east':
data = [d for d in data if abs(d[1]) > 90]
elif flips == 'west':
data = [d for d in data if abs(d[1]) < 90]
a_data = np.array(data)
if self.verbose:
print("Parsed data", a_data)
if self.altaz:
self.aa_az = np.array(a_data[:, 0], np.float)
self.aa_alt = np.array(a_data[:, 1], np.float)
self.ar_az = np.array(a_data[:, 2], np.float)
self.ar_alt = np.array(a_data[:, 3], np.float)
else:
self.aa_ha = np.array(a_data[:, 0], np.float)
self.aa_dec = np.array(a_data[:, 1], np.float)
self.ar_ha = np.array(a_data[:, 2], np.float)
self.ar_dec = np.array(a_data[:, 3], np.float)
self.mjd = np.array(a_data[:, 5], np.float)
# prepare for X ticks positions
last_mjd = 0
last_mjd_hour = 0
self.mjd_ticks = {}
self.mjd_hours = {}
for m in range(0, len(self.mjd)):
jd = self.mjd[m]
if last_mjd != round(jd):
last_mjd = round(jd)
self.mjd_ticks[m] = last_mjd
if last_mjd_hour != round(jd * 24):
last_mjd_hour = round(jd * 24)
self.mjd_hours[m] = jd
if self.altaz:
self.rad_aa_az = np.radians(self.aa_az)
self.rad_aa_alt = np.radians(self.aa_alt)
self.rad_ar_az = np.radians(self.ar_az)
self.rad_ar_alt = np.radians(self.ar_alt)
# transform to ha/dec
self.aa_ha, self.aa_dec = self.hrz_to_equ(self.rad_aa_az, self.rad_aa_alt)
self.ar_ha, self.ar_dec = self.hrz_to_equ(self.rad_ar_az, self.rad_ar_alt)
self.rad_aa_ha = np.radians(self.aa_ha)
self.rad_aa_dec = np.radians(self.aa_dec)
self.rad_ar_ha = np.radians(self.ar_ha)
self.rad_ar_dec = np.radians(self.ar_dec)
else:
self.rad_aa_ha = np.radians(self.aa_ha)
self.rad_aa_dec = np.radians(self.aa_dec)
self.rad_ar_ha = np.radians(self.ar_ha)
self.rad_ar_dec = np.radians(self.ar_dec)
# transform to alt/az
self.aa_alt, self.aa_az = self.equ_to_hrz(self.rad_aa_ha, self.rad_aa_dec)
self.ar_alt, self.ar_az = self.equ_to_hrz(self.rad_ar_ha, self.rad_ar_dec)
self.rad_aa_az = np.radians(self.aa_az)
self.rad_aa_alt = np.radians(self.aa_alt)
self.rad_ar_az = np.radians(self.ar_az)
self.rad_ar_alt = np.radians(self.ar_alt)
self.diff_ha = self.aa_ha - self.ar_ha
self.diff_corr_ha = self.diff_ha * np.cos(self.rad_aa_dec)
self.diff_dec = self.aa_dec - self.ar_dec
self.diff_angular_hadec = libnova.angular_separation(self.aa_ha, self.aa_dec, self.ar_ha, self.ar_dec)
self.diff_angular_altaz = libnova.angular_separation(self.aa_az, self.aa_alt, self.ar_az, self.ar_alt)
self.diff_alt = self.aa_alt - self.ar_alt
self.diff_az = normalize_az_err(self.aa_az - self.ar_az)
self.diff_corr_az = self.diff_az * np.cos(self.rad_aa_alt)
def set_fixed(self, fixed):
"""Sets fixed parameters."""
self.fixed.extend(fixed)
def set_vary(self, variable):
self.variable = variable
def print_parameters(self, pars, stderr=False):
print('Name value(") fixed', end=' ')
if stderr:
print('stderr(") fr(%) m')
np.seterr(divide='ignore')
mv = max(pars, key=lambda p: abs(np.divide(pars[p].stderr, pars[p].value)))
else:
print()
for k in list(pars.keys()):
print('{0:24}{1:10.2f} {2}'.format(k, np.degrees(pars[k].value) * 3600.0, ' ' if pars[k].vary else '* '), end=' ')
if stderr:
fr = abs(np.divide(pars[k].stderr, pars[k].value))
print('{0:8.2f} {1:>5.1f}{2}'.format(np.degrees(pars[k].stderr) * 3600.0, 100 * fr, ' *' if mv == k else ' '))
else:
print()
def process_params(self):
for ep in self.extra:
self.params.add(ep.parname(), value=0)
if self.variable:
for p in list(self.params.keys()):
self.params[p].vary = p in self.variable
else:
for f in self.fixed:
self.params[f].vary = False
print()
print('====== INITIAL MODEL VALUES ================')
self.print_parameters(self.params)
def fit(self, ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=1000):
"""Runs least square fit on input data."""
self.params = Parameters()
if self.altaz:
self.params.add('ia', value=0)
self.params.add('ie', value=0)
self.params.add('tn', value=0)
self.params.add('te', value=0)
self.params.add('npae', value=0)
self.params.add('npoa', value=0)
self.params.add('tf', value=0)
self.process_params()
self.best = minimize(self.fit_model_altaz, self.params, args=(self.rad_aa_az, self.rad_ar_az, self.rad_aa_alt, self.rad_ar_alt), full_output=True, maxfev=maxfev, ftol=ftol, xtol=xtol, gtol=gtol)
else:
self.params.add('ih', value=0)
self.params.add('id', value=0)
self.params.add('ch', value=0)
self.params.add('tf', value=0)
self.params.add('ma', value=0)
self.params.add('me', value=0)
self.params.add('np', value=0)
self.params.add('tf', value=0)
self.params.add('fo', value=0)
self.params.add('daf', value=0)
self.process_params()
self.best = minimize(self.fit_model_gem, self.params, args=(self.rad_aa_ha, self.rad_ar_ha, self.rad_aa_dec, self.rad_ar_dec), full_output=True, maxfev=maxfev, ftol=ftol, xtol=xtol, gtol=gtol)
if self.verbose:
print('Fit result', self.best.params)
print()
print('====== MODEL FITTED VALUES =================')
self.print_parameters(self.best.params, True)
if self.altaz:
self.f_model_az, self.f_model_alt = self.fit_model_azel(self.best.params, self.rad_aa_az, self.rad_ar_az, self.rad_aa_alt, self.rad_ar_alt)
self.diff_model_az = np.degrees(self.f_model_az)
self.diff_model_alt = np.degrees(self.f_model_alt)
self.am_ha, self.am_dec = self.hrz_to_equ(self.rad_ar_az - self.f_model_az, self.rad_ar_alt - self.f_model_alt)
self.diff_model_ha = normalize_ha_err(self.am_ha - self.ar_ha)
self.diff_model_dec = self.am_dec - self.ar_dec
self.diff_model_angular = self.fit_model_altaz(self.best.params, self.rad_aa_az, self.rad_ar_az, self.rad_aa_alt, self.rad_ar_alt)
else:
# feed parameters to diff, obtain model differences. Closer to zero = better
self.f_model_ha, self.f_model_dec = self.fit_model_hadec(self.best.params, self.rad_aa_ha, self.rad_ar_ha, self.rad_aa_dec, self.rad_ar_dec)
self.diff_model_ha = np.degrees(self.f_model_ha)
self.diff_model_dec = np.degrees(self.f_model_dec)
self.am_alt, self.am_az = self.equ_to_hrz(self.rad_ar_ha - self.f_model_ha, self.rad_ar_dec - self.f_model_dec)
self.diff_model_alt = self.am_alt - self.ar_alt
self.diff_model_az = normalize_az_err(self.am_az - self.ar_az)
self.diff_model_angular = self.fit_model_gem(self.best.params, self.rad_aa_ha, self.rad_ar_ha, self.rad_aa_dec, self.rad_ar_dec)
self.diff_model_corr_az = self.diff_model_az * np.cos(self.rad_aa_alt)
self.diff_model_corr_ha = self.diff_model_ha * np.cos(self.rad_aa_ha)
return self.best.params
def fit_to_extra(self):
"""Propagates fit to extra parameters (multi). Must be called before fit is used for model operations"""
for ep in self.extra:
ep.multi = self.best.params[ep.parname()].value
def remove_line(self, ind):
self.rad_aa_az = np.delete(self.rad_aa_az, ind)
self.rad_ar_az = np.delete(self.rad_ar_az, ind)
self.rad_aa_alt = np.delete(self.rad_aa_alt, ind)
self.rad_ar_alt = np.delete(self.rad_ar_alt, ind)
self.rad_aa_ha = np.delete(self.rad_aa_ha, ind)
self.rad_ar_ha = np.delete(self.rad_ar_ha, ind)
self.rad_aa_dec = np.delete(self.rad_aa_dec, ind)
self.rad_ar_dec = np.delete(self.rad_ar_dec, ind)
self.aa_ha = np.delete(self.aa_ha, ind)
self.ar_ha = np.delete(self.ar_ha, ind)
self.aa_dec = np.delete(self.aa_dec, ind)
self.ar_dec = np.delete(self.ar_dec, ind)
self.aa_az = np.delete(self.aa_az, ind)
self.ar_az = np.delete(self.ar_az, ind)
self.aa_alt = np.delete(self.aa_alt, ind)
self.ar_alt = np.delete(self.ar_alt, ind)
self.diff_ha = np.delete(self.diff_ha, ind)
self.diff_corr_ha = np.delete(self.diff_corr_ha, ind)
self.diff_dec = np.delete(self.diff_dec, ind)
self.diff_angular_hadec = np.delete(self.diff_angular_hadec, ind)
self.diff_angular_altaz = np.delete(self.diff_angular_altaz, ind)
self.diff_alt = np.delete(self.diff_alt, ind)
self.diff_az = np.delete(self.diff_az, ind)
self.diff_corr_az = np.delete(self.diff_corr_az, ind)
self.mjd = np.delete(self.mjd, ind)
ret = self.lines[ind]
del self.lines[ind]
return ret
def filter(self, axis, error, num):
# find max error
ax_d = []
if axis == 'm-azel' or axis == 'm-altaz':
ax_d.append(self.diff_model_corr_az)
ax_d.append(self.diff_model_alt)
elif axis == 'm-hadec':
ax_d.append(self.diff_model_corr_ha)
ax_d.append(self.diff_model_dec)
else:
ax_d.append(self.__get_data(axis)[0])
removed = []
while num > 0:
mi = np.argmax(np.abs(ax_d[0]))
max_v = abs(ax_d[0][mi])
for a in ax_d[1:]:
ai = np.argmax(np.abs(a))
max_a = abs(a[ai])
if max_a > max_v:
mi = ai
max_v = max_a
if self.verbose:
print('axis {0} found maximal value {1} at index {2}'.format(axis, max_v, mi))
if max_v < error:
if len(removed) == 0:
return None
return removed
removed.append(self.remove_line(mi))
self.fit()
self.print_params()
self.print_stat()
num -= 1
return removed
def autofix_terms(self, max_pcnt=100):
"""Removes from fit any terms with error greater than"""
mt = self.find_max_error()
while mt is not None and 100 * abs(np.divide(self.best.params[mt].stderr, self.best.params[mt].value)) > max_pcnt:
print('Fixing {0} (stderr {1:>3.1f}")'.format(mt, 100 * abs(self.best.params[mt].stderr / self.best.params[mt].value)))
self.set_fixed([mt])
self.fit()
self.print_params()
mt = self.find_max_error()
def find_max_error(self):
return max(self.best.params.iterkeys(), key=lambda k: abs(np.divide(self.best.params[k].stderr, self.best.params[k].value)))
def print_params(self):
if self.verbose is False:
print()
print('Covariance: {0}'.format(self.best.covar))
print('Status: {0}'.format(self.best.status))
print('Message: {0}'.format(self.best.lmdif_message))
print('Number of evalutaions: {0}'.format(self.best.nfev))
print('Ier: {0}'.format(self.best.ier))
print()
# TODO move model term names to manual page
# if self.altaz:
# print('Zero point in AZ ................................. {0:>9.2f}"'.format(degrees(self.best.params['ia']) * 3600.0))
# print('Zero point in ALT ................................ {0:>9.2f}"'.format(degrees(self.best.params['ie']) * 3600.0))
# print('Tilt of az-axis against N ........................ {0:>9.2f}"'.format(degrees(self.best.params['tn']) * 3600.0))
# print('Tilt of az-axis against E ........................ {0:>9.2f}"'.format(degrees(self.best.params['te']) * 3600.0))
# print('Non-perpendicularity of alt to az axis ........... {0:>9.2f}"'.format(degrees(self.best.params['npae']) * 3600.0))
# print('Non-perpendicularity of optical axis to alt axis . {0:>9.2f}"'.format(degrees(self.best.params['npoa']) * 3600.0))
# print('Tube flexure ..................................... {0:>9.2f}"'.format(degrees(self.best.params['tf']) * 3600.0))
# else:
# print('Zero point in DEC ................................ {0:>9.2f}"'.format(degrees(self.best.params['id']) * 3600.0))
# print('Zero point in RA ................................. {0:>9.2f}"'.format(degrees(self.best.params['ih']) * 3600.0))
# i = sqrt(self.best.params['me']**2 + self.best.params['ma']**2)
# print('Angle between true and instrumental poles ........ {0:>9.2f}"'.format(degrees(i) * 3600.0))
# print('Angle between line of pole and true meridian ..... {0:>9.2f}"'.format(degrees(atan2(self.best.params['ma'], self.best.params['me'])) * 3600.0))
# print('Telescope tube drop in HA and DEC ................ {0:>9.2f}"'.format(degrees(self.best.params['tf']) * 3600.0))
# print('Angle between optical and telescope tube axes .... {0:>9.2f}"'.format(degrees(self.best.params['np']) * 3600.0))
# print('Mechanical orthogonality of RA and DEC axes ...... {0:>9.2f}"'.format(degrees(self.best.params['ma']) * 3600.0))
# print('DEC axis flexure ................................. {0:>9.2f}"'.format(degrees(self.best.params['daf']) * 3600.0))
# print('Fork flexure ..................................... {0:>9.2f}"'.format(degrees(self.best.params['fo']) * 3600.0))
#
# for e in self.extra:
# print('{0}\t{1:.2f}"\t{2}'.format(e.axis.upper(), np.degrees(self.best.params[e.parname()].value) * 3600.0, e))
#
def get_model_type(self):
if self.altaz:
return 'RTS2_ALTAZ'
else:
return 'RTS2_GEM'
def print_stat(self):
# calculates root mean squeare of vector/array
def RMS(vector):
return np.sqrt(np.mean(np.square(vector)))
def print_header():
return " {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s}".format("MIN", "MAX", "MEAN", "RMS", "STDEV")
def print_vect_stat(v):
return '{0:>9.3f}" {1:>9.3f}" {2:>9.3f}" {3:>9.3f}" {4:>9.3f}"'.format(np.min(v), np.max(v), np.mean(v), RMS(v), np.std(v))
print()
print('=========== OBSERVATION DATA ============ OBSERVATION DATA ===================== OBSERVATION DATA ======')
print('OBSERVATIONS ............ {0}'.format(len(self.diff_angular_hadec)))
print(print_header())
print('RA DIFF .................', print_vect_stat(self.diff_ha * 3600))
print('RA CORRECTED DIFF .......', print_vect_stat(self.diff_corr_ha * 3600))
print('DEC DIFF RMS ............', print_vect_stat(self.diff_dec * 3600))
print('AZ DIFF RMS .............', print_vect_stat(self.diff_az * 3600))
print('AZ CORRECTED DIFF RMS ...', print_vect_stat(self.diff_corr_az * 3600))
print('ALT DIFF RMS ............', print_vect_stat(self.diff_alt * 3600))
print('ANGULAR RADEC SEP DIFF ..', print_vect_stat(self.diff_angular_hadec * 3600))
print('ANGULAR ALTAZ SEP DIFF ..', print_vect_stat(self.diff_angular_altaz * 3600))
print('ANGULAR SEP DIFF ........', print_vect_stat((self.diff_angular_altaz if self.altaz else self.diff_angular_hadec) * 3600))
if self.best is not None:
print()
print('=========== MODEL ======================= MODEL ==================================== MODEL =============')
print(print_header())
print('MODEL RA DIFF ...........', print_vect_stat(self.diff_model_ha * 3600))
print('MODEL RA CORRECTED DIFF .', print_vect_stat(self.diff_model_corr_ha * 3600))
print('MODEL DEC DIFF ..........', print_vect_stat(self.diff_model_dec * 3600))
print('MODEL AZ DIFF ...........', print_vect_stat(self.diff_model_az * 3600))
print('MODEL AZ CORRECTED DIFF .', print_vect_stat(self.diff_model_corr_az * 3600))
print('MODEL ALT DIFF ..........', print_vect_stat(self.diff_model_alt * 3600))
print('MODEL ANGULAR SEP DIFF ..', print_vect_stat(self.diff_model_angular * 3600))
# set X axis to MJD data
def set_x_axis(self, plot):
import pylab
def mjd_formatter(x, pos):
try:
return self.mjd_ticks[int(x)]
except KeyError as ke:
try:
return self.mjd[int(x)]
except IndexError as ie:
return x
plot.xaxis.set_major_formatter(pylab.FuncFormatter(mjd_formatter))
plot.set_xticks(list(self.mjd_ticks.keys()))
plot.set_xticks(list(self.mjd_hours.keys()), minor=True)
plot.grid(which='major')
return plot
# set Y axis to arcsec distance
def set_y_axis(self, plot):
import pylab
def arcmin_formatter(x, pos):
return "{0}'".format(int(x / 60))
ymin, ymax = plot.get_ylim()
numticks = len(plot.get_yticks())
mtscale = max(60, 60 * int(abs(ymax - ymin) / numticks / 60))
plot.set_yticks(np.arange(ymin - ymin % 60, ymax - ymax % 60, mtscale))
plot.set_yticks(np.arange(ymin - ymin % 60, ymax - ymax % 60, mtscale / 6.0), minor=True)
plot.yaxis.set_major_formatter(pylab.FuncFormatter(arcmin_formatter))
return plot
def plot_alt_az(self, fig, ax, contour='', pfact=4):
import pylab
ax.set_rmax(90)
ax.set_xticklabels(['E', 'NE', 'N', 'NW', 'W', 'SW', 'S', 'SE'])
ax.plot(np.radians(270 - self.aa_az), 90 - self.aa_alt, 'r.')
ax.plot(np.radians(270 - self.ar_az), 90 - self.ar_alt, 'g.')
if contour:
X = np.radians(270 - self.ar_az)
Y = 90 - self.ar_alt
if contour == 'model':
Z = self.diff_model_angular * 3600
ax.set_title('Model differences')
elif contour == 'real':
Z = self.diff_angular_altaz * 3600 if self.altaz else self.diff_angular_hadec * 3600
ax.set_title('Real differences')
xi = np.linspace(np.radians(-90), np.radians(271), num=360 * pfact)
yi = np.linspace(min(Y), max(Y), num=90 * pfact)
zi = pylab.griddata(X, Y, Z, xi, yi, interp='linear')
ctf = ax.contourf(xi, yi, zi, cmap='hot')
cbar = fig.colorbar(ctf, ax=ax, orientation='vertical', shrink=0.5)
cbar.set_ticks(list(range(0, int(max(Z)), int(max(Z) / 10.0))))
cbar.ax.set_xticklabels(list(map("{0}'".format, list(range(0, int(max(Z)))))))
else:
ax.set_title('Alt-Az distribution')
return ax
def plot_hist(self, ax, dn, bins):
import pylab
if bins is None:
n, bin, patches = ax.hist(self.__get_data(dn)[0])
else:
n, bin, patches = ax.hist(self.__get_data(dn)[0], bins - 1)
ax.set_title('{0} histogram binned {1}'.format(self.__get_data(dn)[2], len(bin)))
ax.set_ylabel('Occurence')
ax.set_xlabel('{0} arcsec'.format(self.__get_data(dn)[2]))
return ax
def plot_vect(self, ax, x1, y1, x2, y2):
import pylab
u = self.__get_data(x2)[0] - self.__get_data(x1)[0]
v = self.__get_data(y2)[0] - self.__get_data(y1)[0]
dist = np.sqrt(u ** 2 + v ** 2)
ax.quiver(self.__get_data(x1)[0], self.__get_data(y1)[0], u, v, dist)
ax.set_xlabel('{0} - {1}'.format(self.__get_data(x1)[2], self.__get_data(x2)[2]))
ax.set_ylabel('{0} - {1}'.format(self.__get_data(y1)[2], self.__get_data(y2)[2]))
return ax
def __get_data(self, name):
if self.name_map is None:
# maps name to data,plot style,label
name_map = {
'alt-err': [self.diff_alt * 3600, 'r.', 'Alt error'],
'az-err': [self.diff_az * 3600, 'y.', 'Az error'],
'az-corr-err': [self.diff_corr_az * 3600, 'y.', 'AZ alt c error'],
'dec-err': [self.diff_dec * 3600, 'b.', 'Dec error'],
'ha-err': [self.diff_ha * 3600, 'g.', 'HA error'],
'ha-corr-err': [self.diff_corr_ha * 3600, 'g.', 'HA dec c error'],
'mjd': [self.mjd, 'm', 'MJD'],
'num': [list(range(len(self.mjd))), 'm', 'Number'],
'paz': [self.aa_az, 'rx', 'Azimuth'],
'az': [self.aa_az, 'rx', 'Azimuth'],
'alt': [self.aa_alt, 'yx', 'Altitude'],
'dec': [self.aa_dec, 'bx', 'Dec'],
'pd': [pole_distance(self.aa_dec), 'px', 'Pole distance'],
'ha': [self.aa_ha, 'gx', 'HA'],
'real-err': [self.diff_angular_altaz * 3600 if self.altaz else self.diff_angular_hadec * 3600, 'c+', 'Real angular error']
}
# append model output only if model was fitted
if self.best is not None:
name_map.update({
'alt-merr': [self.diff_model_alt * 3600, 'r*', 'Alt model error'],
'az-merr': [self.diff_model_az * 3600, 'y*', 'Az model error'],
'az-corr-merr': [self.diff_model_corr_az * 3600, 'y*', 'Az c model error'],
'dec-merr': [self.diff_model_dec * 3600, 'b*', 'Dec model error'],
'ha-merr': [self.diff_model_ha * 3600, 'g*', 'HA model error'],
'ha-corr-merr': [self.diff_model_ha * 3600, 'g*', 'HA dec c model error'],
'model-err': [self.diff_model_angular * 3600, 'c+', 'Model angular error']
})
return name_map[name.lower()]
def plot_data(self, ax, nx, ny):
"""Generate plot from data."""
if self.verbose:
print('plotting {0} {1}'.format(nx, ny))
xdata = self.__get_data(nx)
ydata = self.__get_data(ny)
ax.plot(xdata[0], ydata[0], ydata[1])
ax.set_xlabel(xdata[2])
ax.set_ylabel(ydata[2])
# if band is not None:
# import matplotlib.patches as patches
# band = float(band)
# p.add_patch(patches.Rectangle((min(xdata[0]), -band), max(xdata[0]) - min(xdata[0]), 2*band, alpha=0.7, facecolor='red', edgecolor='none'))
return ax
def __draw(self, ax, draw):
if draw is None:
return
import matplotlib.pyplot as plt
for d in draw:
ci = d.find('!')
color = None
if ci > 0:
color = d[ci + 1:]
d = d[:ci]
if d[0] == 'c':
try:
x, y, r = list(map(float, d[1:].split(':')))
except ValueError as ve:
x = y = 0
r = float(d[1:])
ax.add_artist(plt.Circle((x, y), r, fill=False, color=color))
elif d[0] == 'x':
try:
x, y, r = list(map(float, d[1:].split(':')))
except ValueError as ve:
x = y = 0
try:
r = float(d[1:])
except ValueError as ve2:
r = np.max(np.abs([ax.get_ylim(), ax.get_xlim()]))
ax.add_artist(plt.Line2D([x, x], [y + r, y - r], color=color))
ax.add_artist(plt.Line2D([x - r, x + r], [y, y], color=color))
else:
raise Exception('unknow draw element {0}'.format(d))
def __gen_plot(self, fig, gridspec, axnam):
if axnam[0] == 'paz':
ax = axnam[1].split('-')
axis = fig.add_subplot(gridspec, projection='polar')
if ax[0] == 'contour':
ret = self.plot_alt_az(fig, axis, ax[1])
else:
ret = self.plot_alt_az(fig, axis)
elif axnam[0] == 'hist':
bins = None
axis = fig.add_subplot(gridspec)
if len(axnam) > 2:
bins = int(axnam[2])
ret = self.plot_hist(axis, axnam[1], bins)
elif axnam[0] == 'vect':
axis = fig.add_subplot(gridspec)
if not len(axnam) == 5:
ret = self.plot_vect(axis, 'az-corr-err', 'alt-err', 'az-corr-merr', 'alt-merr')
else:
ret = self.plot_vect(axis, axnam[1], axnam[2], axnam[3], axnam[4])
else:
axis = fig.add_subplot(gridspec)
for j in axnam[1:]:
ret = self.plot_data(axis, axnam[0], j)
return ret
def __gen_plots(self, plots):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plot = []
grid = []
draw = []
i = 0
rows = 1
cols = 1
# process plotting string
for mg in plots.split(','):
g = [i, 0, 1, 1]
if len(mg) == 0:
raise Exception('empty plot specifier')
plot_s = re.split('([@%])', mg)
plot.append(plot_s[0])
j = 0
while j < len(plot_s):
if plot_s[j] == '@':
j += 1
if len(draw) < len(plot):
draw.append([])
draw[len(plot) - 1].append(plot_s[j])
elif plot_s[j] == '%':
if g is None:
raise Exception('grid can be specified only once')
j += 1
grids = map(int, plot_s[j].split(':'))
if self.verbose:
print('grids', grids)
grid.append(list(map(int, grids)) + g[len(grids):])
g = None
j += 1
if g is not None:
grid.append(g)
if len(draw) < len(plot):
draw.append(None)
lg = grid[-1]
rows = max(lg[0] + lg[2], rows)
cols = max(lg[1] + lg[3], cols)
i += 1
self.plotgrid = (rows, cols)
if self.verbose:
print('row {0} cols {1}'.format(rows, cols))
fig = plt.figure()
gs = gridspec.GridSpec(rows, cols)
axes = []
for i in range(0, len(plot)):
axnam = plot[i].split(':')
if len(axnam) < 2:
sys.exit('invalid plot name - {0} does not contain at least one :'.format(plot[i]))
g = grid[i]
if self.verbose:
print('grid', g)
ax = self.__gen_plot(fig, gs[g[0]:g[0] + g[2], g[1]:g[1] + g[3]], axnam)
self.__draw(ax, draw[i])
axes.append(ax)
return fig, axes
def plot(self, plots, ofile=None):
import pylab
self.__gen_plots(plots)
pylab.tight_layout()
if ofile is None:
pylab.show()
else:
pylab.savefig(ofile)
def plot_offsets(self, best, subplot, ha_start, ha_end, dec):
import pylab
ha_range = np.arange(ha_start, ha_end, 0.01)
dec_r = np.radians(dec)
if self.altaz:
el_offsets = []
az_offsets = []
if self.verbose:
print('ha\taz\tel\taz_off\tel_off')
for ha in ha_range:
ha_r = np.radians(ha)
el, az = libnova.equ_to_hrz(-ha, dec, 0, self.latitude)
az_off, el_off = self.model_azel_hadec(best, np.radians(az), np.radians(el), ha_r, dec_r)
if self.verbose:
print('{0}\t{1}\t{2}\t{3}\t{4}'.format(ha, az, el, az_off * 3600.0, el_off * 3600.0))
el_offsets.append(np.degrees(el_off))
az_offsets.append(np.degrees(az_off))
subplot.plot(ha_range, np.array(az_offsets) * 3600.0, 'b-', ha_range, np.array(el_offsets) * 3600.0, 'g-')
else:
ha_offsets = []
dec_offsets = []
if self.verbose:
print('ha\tha_off\tdec_off')
for ha in ha_range:
ha_r = np.radians(ha)
ha_off, dec_off = self.model_hadec(best, ha_r, dec_r)
if self.verbose:
print('{0}\t{1}\t{2}'.format(ha, ha_off * 3600.0, dec_off * 3600.0))
ha_offsets.append(np.degrees(ha_off))
dec_offsets.append(np.degrees(dec_off))
subplot.plot(ha_range, np.array(ha_offsets) * 3600.0, 'b-', ha_range, np.array(dec_offsets) * 3600.0, 'g-')
def to_string(self, unit='arcseconds'):
if self.altaz:
bbpn = _altaz_params
else:
bbpn = _gem_params
bbp = [self.best.params[x].value for x in bbpn]
uv = '"'
mul = 3600.0
if unit == 'arcminutes':
uv = "'"
mul = 60.0
elif unit == 'degrees':
uv = '\u00B0'
mul = 1.0
out = self.get_model_type() + ' ' + (uv + ' ').join([str(np.degrees(x) * mul) for x in bbp]) + uv
for e in self.extra:
out += ('\n{0}\t{1}' + uv + '\t{2}').format(e.axis.upper(), np.degrees(self.best.params[e.parname()].value) * mul, e)
return out
def __str__(self):
return self.to_string()
def save(self, fn):
"""Save model to file."""
f = open(fn, 'w+')
f.write(str(self))
f.close()
def load(self, fn):
f = open(fn)
# basic parameters
bp = None
self.altaz = None
self.best = minimizer.MinimizerResult()
self.best.params = Parameters()
while True:
rl = f.readline()
if rl == '':
break
if rl[0] == '#':
if self.verbose:
print('ignoring comment line {0}'.format(rl))
continue
line = rl.split()
if line[0] == 'RTS2_MODEL' or line[0] == 'RTS2_GEM':
if len(line) != 10:
raise Exception('invalid number of GEM parameters')
if self.altaz is not None:
raise Exception('cannot specify model type twice')
line = line[1:]
for pn in _gem_params:
self.best.params[pn] = minimizer.Parameter()
self.best.params[pn].value = _str_to_rad(line[0])
line = line[1:]
self.altaz = False
elif line[0] == 'RTS2_ALTAZ':
if len(line) != 8:
raise Exception('invalid number of Alt-Az parameters')
if self.altaz is not None:
raise Exception('cannot specify model type twice')
line = line[1:]
for pn in _altaz_params:
self.best.params[pn] = minimizer.Parameter()
self.best.params[pn].value = _str_to_rad(line[0])
line = line[1:]
self.altaz = True
# extra params
elif len(line) == 5:
ep = self.add_extra_multi(*line)
self.best.params[ep.parname()] = minimizer.Parameter()
self.best.params[ep.parname()].value = ep.multi
else:
raise Exception('unknow line: {0}'.format(rl))
f.close()
if self.altaz is None:
raise Exception('model type not specified')
self.modelfile = fn
def add_model(self, m):
"""Adds to current model another (compatible) model."""
if self.altaz != m.altaz:
raise Exception('cannot add two differnet models')
if self.altaz:
bbpn = _altaz_params
else:
bbpn = _gem_params
for p in bbpn:
self.best.params[p].value += m.best.params[p].value
for e in self.extra:
for e2 in m.extra:
if e == e2:
e.multi += e2.multi
m.extra.remove(e2)
# what left is unique to m2
self.extra += m.extra
for e in self.extra:
try:
self.best.params[e.parname()].value = e.multi
except KeyError as ve:
self.best.params[e.parname()] = minimizer.Parameter()
self.best.params[e.parname()].value = e.multi
def pdf_report(self, output_file, template_file=None):
"""Generates PDF report based on template file. If no template is provided, default report is generated."""
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import datetime
# add default template if none provided
if template_file is None:
if self.altaz:
template = ["@vect:az-corr-err:alt-err:az-corr-merr:alt-merr@c10@c20@x", "@ha:dec", "@az:alt", "@az-corr-err:alt-err@c10"]
else:
template = ["@vect:ha-corr-err:ha-err:ha-corr-merr:ha-merr@c10@c20@x", "@ha:dec", "@ha-corr-err:alt-err@c10"]
else:
tf = open(template_file, 'r')
template = tf.readlines()
tf.close
pdf = PdfPages(output_file)
text = ''
for line in template:
if self.verbose:
print('Generating report line {0}'.format(line))
# plot
if line[0] == '@':
fig, axes = self.__gen_plots(line[1:].rstrip('\n'))
if len(text) == 0:
text = 'Figure from {0}'.format(line[1:])
axes[0].set_title(text, wrap=True)
pdf.savefig(fig)
plt.close()
text = ''
# close page
elif line[0] == '!':
fig = plt.figure()
fig.text(.1, .95, text, va='top', wrap=True)
pdf.savefig(fig)
plt.close()
# text
elif line[0] != '#':
text += line
d = pdf.infodict()
d['Title'] = 'GPoint report'
d['ModDate'] = datetime.datetime.today()
pdf.close()
| lgpl-3.0 |
oemof/oemof_examples | oemof_examples/oemof.solph/v0.3.x/simple_dispatch/simple_dispatch.py | 2 | 6730 | # -*- coding: utf-8 -*-
"""
General description
-------------------
This example shows how to create an energysystem with oemof objects and
solve it with the solph module. Results are plotted with outputlib.
Dispatch modelling is a typical thing to do with solph. However cost does not
have to be monetary but can be emissions etc. In this example a least cost
dispatch of different generators that meet an inelastic demand is undertaken.
Some of the generators are renewable energies with marginal costs of zero.
Additionally, it shows how combined heat and power units may be easily modelled
as well.
Data
----
input_data.csv
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof and matplotlib. Install by:
pip install 'oemof>=0.3,<0.4'
Optional to see the plots:
pip install matplotlib
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
from oemof.solph import (Sink, Source, Transformer, Bus, Flow, Model,
EnergySystem)
from oemof.outputlib import views
import matplotlib.pyplot as plt
solver = 'cbc'
# Create an energy system and optimize the dispatch at least costs.
# ####################### initialize and provide data #####################
datetimeindex = pd.date_range('1/1/2016', periods=24*10, freq='H')
energysystem = EnergySystem(timeindex=datetimeindex)
filename = os.path.join(os.path.dirname(__file__), 'input_data.csv')
data = pd.read_csv(filename, sep=",")
# ######################### create energysystem components ################
# resource buses
bcoal = Bus(label='coal', balanced=False)
bgas = Bus(label='gas', balanced=False)
boil = Bus(label='oil', balanced=False)
blig = Bus(label='lignite', balanced=False)
# electricity and heat
bel = Bus(label='bel')
bth = Bus(label='bth')
energysystem.add(bcoal, bgas, boil, blig, bel, bth)
# an excess and a shortage variable can help to avoid infeasible problems
energysystem.add(Sink(label='excess_el', inputs={bel: Flow()}))
# shortage_el = Source(label='shortage_el',
# outputs={bel: Flow(variable_costs=200)})
# sources
energysystem.add(Source(label='wind', outputs={bel: Flow(
actual_value=data['wind'], nominal_value=66.3, fixed=True)}))
energysystem.add(Source(label='pv', outputs={bel: Flow(
actual_value=data['pv'], nominal_value=65.3, fixed=True)}))
# demands (electricity/heat)
energysystem.add(Sink(label='demand_el', inputs={bel: Flow(
nominal_value=85, actual_value=data['demand_el'], fixed=True)}))
energysystem.add(Sink(label='demand_th',
inputs={bth: Flow(nominal_value=40,
actual_value=data['demand_th'],
fixed=True)}))
# power plants
energysystem.add(Transformer(
label='pp_coal',
inputs={bcoal: Flow()},
outputs={bel: Flow(nominal_value=20.2, variable_costs=25)},
conversion_factors={bel: 0.39}))
energysystem.add(Transformer(
label='pp_lig',
inputs={blig: Flow()},
outputs={bel: Flow(nominal_value=11.8, variable_costs=19)},
conversion_factors={bel: 0.41}))
energysystem.add(Transformer(
label='pp_gas',
inputs={bgas: Flow()},
outputs={bel: Flow(nominal_value=41, variable_costs=40)},
conversion_factors={bel: 0.50}))
energysystem.add(Transformer(
label='pp_oil',
inputs={boil: Flow()},
outputs={bel: Flow(nominal_value=5, variable_costs=50)},
conversion_factors={bel: 0.28}))
# combined heat and power plant (chp)
energysystem.add(Transformer(
label='pp_chp',
inputs={bgas: Flow()},
outputs={bel: Flow(nominal_value=30, variable_costs=42),
bth: Flow(nominal_value=40)},
conversion_factors={bel: 0.3, bth: 0.4}))
# heat pump with a coefficient of performance (COP) of 3
b_heat_source = Bus(label='b_heat_source')
energysystem.add(b_heat_source)
energysystem.add(Source(label='heat_source', outputs={b_heat_source: Flow()}))
cop = 3
energysystem.add(Transformer(
label='heat_pump',
inputs={bel: Flow(),
b_heat_source: Flow()},
outputs={bth: Flow(nominal_value=10)},
conversion_factors={bel: 1/3, b_heat_source: (cop-1)/cop}))
# ################################ optimization ###########################
# create optimization model based on energy_system
optimization_model = Model(energysystem=energysystem)
# solve problem
optimization_model.solve(solver=solver,
solve_kwargs={'tee': True, 'keepfiles': False})
# write back results from optimization object to energysystem
optimization_model.results()
# ################################ results ################################
# subset of results that includes all flows into and from electrical bus
# sequences are stored within a pandas.DataFrames and scalars e.g.
# investment values within a pandas.Series object.
# in this case the entry data['scalars'] does not exist since no investment
# variables are used
data = views.node(optimization_model.results(), 'bel')
data['sequences'].info()
print('Optimization successful. Showing some results:')
# see: https://pandas.pydata.org/pandas-docs/stable/visualization.html
node_results_bel = views.node(optimization_model.results(), 'bel')
node_results_flows = node_results_bel['sequences']
fig, ax = plt.subplots(figsize=(10, 5))
node_results_flows.plot(ax=ax, kind='bar', stacked=True, linewidth=0, width=1)
ax.set_title('Sums for optimization period')
ax.legend(loc='upper right', bbox_to_anchor=(1, 1))
ax.set_xlabel('Energy (MWh)')
ax.set_ylabel('Flow')
plt.legend(loc='center left', prop={'size': 8}, bbox_to_anchor=(1, 0.5))
fig.subplots_adjust(right=0.8)
dates = node_results_flows.index
tick_distance = int(len(dates) / 7) - 1
ax.set_xticks(range(0, len(dates), tick_distance), minor=False)
ax.set_xticklabels(
[item.strftime('%d-%m-%Y') for item in dates.tolist()[0::tick_distance]],
rotation=90, minor=False)
plt.show()
node_results_bth = views.node(optimization_model.results(), 'bth')
node_results_flows = node_results_bth['sequences']
fig, ax = plt.subplots(figsize=(10, 5))
node_results_flows.plot(ax=ax, kind='bar', stacked=True, linewidth=0, width=1)
ax.set_title('Sums for optimization period')
ax.legend(loc='upper right', bbox_to_anchor=(1, 1))
ax.set_xlabel('Energy (MWh)')
ax.set_ylabel('Flow')
plt.legend(loc='center left', prop={'size': 8}, bbox_to_anchor=(1, 0.5))
fig.subplots_adjust(right=0.8)
dates = node_results_flows.index
tick_distance = int(len(dates) / 7) - 1
ax.set_xticks(range(0, len(dates), tick_distance), minor=False)
ax.set_xticklabels(
[item.strftime('%d-%m-%Y') for item in dates.tolist()[0::tick_distance]],
rotation=90, minor=False)
plt.show()
| gpl-3.0 |
borisz264/mono_seq | uniform_colormaps.py | 28 | 50518 | # New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
| mit |
rajat1994/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Adai0808/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
gclenaghan/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.