repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
cdegroc/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 5 | 5860 | """
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to
the presence of outliers in the data set. In such a case, one would
have better to use a robust estimator of covariance to garanty that
the estimation is resistant to "errorneous" observations in the data
set.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_samples-n_features-1}{2}` outliers) estimator of
covariance. The idea is to find :math:`\frac{n_samples+n_features+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance. After a correction
step aiming at compensating the fact the the estimates were learnt
from only a portion of the initial data, we end up with robust
estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
In this example, we compare the estimation errors that are made when
using three types of location and covariance estimates on contaminated
gaussian distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided n_samples > 5 * n_features
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
"""
print __doc__
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
# generate data
X = np.random.randn(n_samples, n_features)
# add some outliers
outliers_index = np.random.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
S = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(S.location_ ** 2)
err_cov_mcd[i, j] = S.error_norm(np.eye(n_features))
# compare estimators learnt from the full data set with true parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learnt from a pure data set
# (i.e. "perfect" MCD)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
pl.subplot(2, 1, 1)
pl.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
pl.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
pl.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
pl.title("Influence of outliers on the location estimation")
pl.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
pl.legend(loc="upper left", prop=font_prop)
pl.subplot(2, 1, 2)
x_size = range_n_outliers.size
pl.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (MCD)", color='m')
pl.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
pl.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)],
color='green', ls='--')
pl.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
pl.title("Influence of outliers on the covariance estimation")
pl.xlabel("Amount of contamination (%)")
pl.ylabel("RMSE")
pl.legend(loc="upper center", prop=font_prop)
pl.show()
| bsd-3-clause |
rjw57/vagrant-ipython | ipython/profile_default/ipython_notebook_config.py | 1 | 19754 | # Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'IPython.html.services.kernels.kernelmanager.MappingKernelManager'>
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
c.NotebookApp.open_browser = False
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
#
# c.NotebookApp.file_to_run = ''
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'IPython.html.services.config.manager.ConfigManager'>
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = ''
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/vagrant/notebooks'
# The IPython profile to use.
# c.NotebookApp.profile = 'default'
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'IPython.html.services.sessions.sessionmanager.SessionManager'>
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'IPython.html.auth.login.LoginHandler'>
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'IPython.html.services.contents.filemanager.FileContentsManager'>
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = ''
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from IPython.html.templates.
# c.NotebookApp.extra_template_paths = []
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'IPython.html.auth.logout.LogoutHandler'>
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The kernel spec manager class to use. Should be a subclass of
# `IPython.kernel.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'IPython.kernel.kernelspec.KernelSpecManager'>
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The cluster manager class to use.
# c.NotebookApp.cluster_manager_class = <class 'IPython.html.services.clusters.clustermanager.ClusterManager'>
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
#
# c.KernelManager.transport = 'tcp'
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# Debug output in the Session
# c.Session.debug = False
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'vagrant'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# execution key, for extra authentication.
# c.Session.key = b''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
# The name of the default kernel to start
# c.MappingKernelManager.default_kernel_name = 'python3'
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
#
# c.ContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword):
#
# hook(path=path, model=model, contents_manager=self)
#
# model: the model to be saved. Includes file contents.
# modifying this dict will affect the file that is stored.
# path: the API path of the save destination
# contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# FileContentsManager will inherit config from: ContentsManager
# The base name used when creating untitled files.
# c.FileContentsManager.untitled_file = 'untitled'
#
# c.FileContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'>
#
# c.FileContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.FileContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
#
# c.FileContentsManager.root_dir = ''
# The base name used when creating untitled notebooks.
# c.FileContentsManager.untitled_notebook = 'Untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword):
#
# hook(path=path, model=model, contents_manager=self)
#
# model: the model to be saved. Includes file contents.
# modifying this dict will affect the file that is stored.
# path: the API path of the save destination
# contents_manager: this ContentsManager instance
# c.FileContentsManager.pre_save_hook = None
#
# c.FileContentsManager.checkpoints = None
# The base name used when creating untitled directories.
# c.FileContentsManager.untitled_directory = 'Untitled Folder'
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword):
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# path: the filesystem path to the file just written model: the model
# representing the file contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The sqlite file in which to store notebook signatures. By default, this will
# be in your IPython profile. You can set it to ':memory:' to disable sqlite
# writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
| mit |
sdh11/gnuradio | gr-digital/examples/example_fll.py | 7 | 5704 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*numpy.random.randint(0, 2, N) - 1.0
data = numpy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=2000,
help="Set the number of samples to process [default=%(default)r]")
parser.add_argument("-S", "--sps", type=int, default=4,
help="Set the samples per symbol [default=%(default)r]")
parser.add_argument("-r", "--rolloff", type=eng_float, default=0.35,
help="Set the rolloff factor [default=%(default)r]")
parser.add_argument("-W", "--bandwidth", type=eng_float, default=2*numpy.pi/100.0,
help="Set the loop bandwidth [default=%(default)r]")
parser.add_argument("-n", "--ntaps", type=int, default=45,
help="Set the number of taps in the filters [default=%(default)r]")
parser.add_argument("--noise", type=eng_float, default=0.0,
help="Set the simulation noise voltage [default=%(default)r]")
parser.add_argument("-f", "--foffset", type=eng_float, default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%(default)r]")
parser.add_argument("-t", "--toffset", type=eng_float, default=1.0,
help="Set the simulation's timing offset [default=%(default)r]")
parser.add_argument("-p", "--poffset", type=eng_float, default=0.0,
help="Set the simulation's phase offset [default=%(default)r]")
args = parser.parse_args()
# Adjust N for the interpolation by sps
args.nsamples = args.nsamples // args.sps
# Set up the program-under-test
put = example_fll(args.nsamples, args.sps, args.rolloff,
args.ntaps, args.bandwidth, args.noise,
args.foffset, args.toffset, args.poffset)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_err = numpy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = numpy.array(put.vsnk_frq.data()) / (2.0*numpy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = numpy.array(put.vsnk_fll.data()[2*args.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
TuKo/brainiak | examples/searchlight/example_searchlight.py | 5 | 2942 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mpi4py import MPI
import sys
from brainiak.searchlight.searchlight import Searchlight
from brainiak.searchlight.searchlight import Diamond
"""Distributed Searchlight Example
example usage: mpirun -n 4 python3 example_searchlight.py
"""
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# Dataset size parameters
dim = 40
ntr = 400
maskrad = 15
# Predictive point parameters
pt = (23,23,23)
kernel_dim = 5
weight = 1
# Generate data
data = np.random.random((dim,dim,dim,ntr)) if rank == 0 else None
mask = np.zeros((dim,dim,dim), dtype=np.bool)
for i in range(dim):
for j in range(dim):
for k in range(dim):
dist = np.sqrt(((dim/2)-i)**2 + ((dim/2)-j)**2 + ((dim/2)-k)**2)
if(dist < maskrad):
mask[i,j,k] = 1
# Generate labels
labels = np.random.choice([True, False], (ntr,)) if rank == 0 else None
# Inject predictive region in random data
if rank == 0:
kernel = np.zeros((kernel_dim,kernel_dim,kernel_dim))
for i in range(kernel_dim):
for j in range(kernel_dim):
for k in range(kernel_dim):
arr = np.array([i-(kernel_dim/2),j-(kernel_dim/2),k-(kernel_dim/2)])
kernel [i,j,k] = np.exp(-np.dot(arr.T,arr))
kernel = kernel / np.sum(kernel)
for (idx, l) in enumerate(labels):
if l:
data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] += kernel * weight
else:
data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] -= kernel * weight
# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond,
min_active_voxels_proportion=0)
# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)
# Define voxel function
def sfn(l, msk, myrad, bcast_var):
import sklearn.svm
import sklearn.model_selection
classifier = sklearn.svm.SVC()
data = l[0][msk,:].T
return np.mean(sklearn.model_selection.cross_val_score(classifier, data, bcast_var,n_jobs=1))
# Run searchlight
global_outputs = sl.run_searchlight(sfn)
# Visualize result
if rank == 0:
print(global_outputs)
global_outputs = np.array(global_outputs, dtype=np.float)
import matplotlib.pyplot as plt
for (cnt, img) in enumerate(global_outputs):
plt.imshow(img,cmap='hot',vmin=0,vmax=1)
plt.savefig('img' + str(cnt) + '.png')
plt.clf()
| apache-2.0 |
sjsrey/pysal | docsrc/conf.py | 4 | 8315 | # -*- coding: utf-8 -*-
#
# pysal documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 15:54:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
import sphinx_bootstrap_theme
sys.path.insert(0, os.path.abspath("../"))
# import your package to obtain the version info to display on the docs website
import pysal
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ #'sphinx_gallery.gen_gallery',
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"sphinx.ext.mathjax",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"numpydoc",
#'sphinx.ext.napoleon',
"matplotlib.sphinxext.plot_directive",
]
intersphinx_mapping = {
'libpysal': ('https://pysal.org/libpysal/', (None, 'libpysal-inv.txt'))
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pysal" # string of your project name, for example, 'giddy'
copyright = "2018-, pysal developers"
author = "pysal developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version.
version = pysal.__version__ # should replace it with your pysal
release = pysal.__version__ # should replace it with your pysal
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "tests/*"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "bootstrap"
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_title = "%s v%s Manual" % (project, version)
# (Optional) Logo of your package. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
# html_logo = "_static/images/package_logo.jpg"
# (Optional) PySAL favicon
html_favicon = "_static/images/pysal_favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
"navbar_title": "pysal", # string of your project name, for example, 'giddy'
# Render the next and previous page links in navbar. (Default: true)
"navbar_sidebarrel": False,
# Render the current pages TOC in the navbar. (Default: true)
#'navbar_pagenav': True,
#'navbar_pagenav': False,
# No sidebar
"nosidebar": True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
"globaltoc_depth": 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
"globaltoc_includehidden": "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
"navbar_fixed_top": "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
"source_link_position": "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo", "yeti", "flatly".
"bootswatch_theme": "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
"bootstrap_version": "3",
# Navigation bar menu
"navbar_links": [
("Installation", "installation"),
("API", "api"),
("References", "references"),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pysal" + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pysal.tex", u"pysal Documentation", u"pysal developers", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pysal", u"pysal Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pysal",
u"pysal Documentation",
author,
"pysal",
"One line description of project.",
"Miscellaneous",
)
]
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = True
class_members_toctree = True
numpydoc_show_inherited_class_members = True
numpydoc_use_plots = True
# display the source code for Plot directive
plot_include_source = True
def setup(app):
app.add_stylesheet("pysal-styles.css")
| bsd-3-clause |
plissonf/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
0x0all/scikit-learn | sklearn/datasets/tests/test_lfw.py | 50 | 6849 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_people():
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100)
@raises(IOError)
def test_load_empty_lfw_pairs():
load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
ahkab/ahkab | ahkab/testing.py | 1 | 33795 | # -*- coding: utf-8 -*-
# testing.py
# Testing framework
# Copyright 2014 Giuseppe Venturini
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""
A straight-forward framework to buid tests to ensure no regressions
occur during development.
Two classes for describing tests are defined in this module:
- :class:`NetlistTest`, used to run a netlist-based test,
- :class:`APITest`, used to run an API-based test.
Every test, no matter which class is referenced internally, is
univocally identified by a alphanumeric id, which will
be referred to as ``<test_id>`` in the following.
Directory structure
\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"
The tests are placed in ``tests/``, under a directory with the same
id as the test, ie:
::
tests/<test_id>/
Running tests
\"\"\"\"\"\"\"\"\"\"\"\"\"
The test is performed with as working directory one among the following:
- The ahkab repository root,
- ``tests/``,
- ``tests/<test_id>``.
this is necessary for the framework to find its way to the reference files.
More specifically a test can either be run manually through the Python
interpreter:
::
python tests/<test_id>/test_<test_id>.py
or with the ``nose`` testing package:
::
nosetests tests/<test_id>/test_<test_id>.py
To run the whole test suite, issue:
::
nosetests tests/*/*.py
Please refer to the `nose documentation`_ for more info about the command
``nosetests``.
.. _nose documentation: https://nose.readthedocs.org/en/latest/
Running your tests for the first time
\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"
The first time you run a test you defined yourself, no reference data will be
available to check the test results and decide whether the test was passed or
if a test fail occurred.
In this case, if you call ``nose``, the test will (expectedly) fail.
Please run the test manually (see above) and the test framework will generate
the reference data for you.
Please *check the generated reference data carefully!*
Wrong reference defeats the whole concept of running tests!
Overview of a typical test based on :class:`NetlistTest`
\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"
Each test is composed by multiple files.
Required files
^^^^^^^^^^^^^^
The main directory must contain:
- ``<test_id>.ini``, an INI configuration file containing the details of the
test,
- ``test_<test_id>.py``, the script executing the test,
- ``<test_id>.ckt``, the main netlist file to be run.
- the reference data files for checking the pass/fail status of the test.
These can be automatically generated, as it will be shown below.
With the exception of the netlist file, which is free for the test writer
to define, and the data files, which clearly depend on the test at hand,
the other files have a predefined structure which will be examined
in more detail in the next sections.
Configuration file
''''''''''''''''''
Few rules are there regarding the entries in the configuration file.
They are as follows:
- The file name must be ``<test_id>.ini``,
- It must be located under ``tests/<test_id>/``,
- It must have a ``[test]`` section, containing the following entries:
- ``name``, set to the ``<test_id>``, for error-checking,
- ``netlist``, set to the netlist filename, ``<test_id>.ckt``, prepended
with the the netlist path relative to ``tests/<test_id>/`` (most of
the time that means just ``<test_id>.ckt``)
- ``type``, a comma-separated list of analyses that will be executed during
the test. Values may be ``op``, ``dc``, ``tran``, ``symbolic``... and so on.
- One entry ``<analysis>_ref`` for each of the analyses listed in the
``type`` entry above.
The value is recommended to be set to ``<test_id>-ref.<analysis>`` or
``<test_id>-ref.<analysis>.pickle``, if you prefer to save data in
Python's pickle format. Notice only trusted pickle files should
ever be loaded.
- ``skip-on-travis``, set to either ``0`` or ``1``, to flag whether this
test should be run on Travis-CI or not. Torture tests, tests needing
lots of CPU or memory, and long-lasting tests in general should be
disabled on Travis-CI to not exceed:
- a total build time of 50 minutes,
- A no stdout activity time of 10 minutes.
- ``skip-on-pypy``, set to either ``0`` or ``1``, to flag whether the test
should be skipped if useing a PYPY Python implemetntation or not. In
general, as PYPY supports neither ``scipy`` nor ``matplotlib``, only
symbolic-oriented tests make sense with PYPY (where it really excels!).
The contents of an example test configuration file ``rtest1.ini``
follow, as an example.
::
[test]
name = rtest1
netlist = rtest1.ckt
type = dc, op
dc_ref = rtest1-ref.dc
op_ref = rtest1-ref.op
skip-on-travis = 0
skip-on-pypy = 1
Script file
'''''''''''
The test script file is where most of the action takes place and where
the highest amount of flexibility is available.
That said, the ahkab testing framework was designed to make for extremely
simple and straight-forward test scripts.
It is probably easier to introduce writing the scripts with an example.
Below is a typical script file.
::
from ahkab.testing import NetlistTest
from ahkab import options
# add this to prevent interactive plot directives
# in the netlist from halting the test waiting for
# user input
options.plotting_show_plots = False
def myoptions():
# optionally, set non-standard options
sim_opts = {}
sim_opts.update({'gmin':1e-9})
sim_opts.update({'iea':1e-3})
sim_opts.update({'transient_max_nr_iter':200})
return sim_opts
def test():
# this requires a netlist ``mytest.ckt``
# and a configuration file ``mytest.ini``
nt = NetlistTest('mytest', sim_opts=myoptions())
nt.setUp()
nt.test()
nt.tearDown()
# It is recommended to set the docstring to a meaningful value
test.__doc__ = "My test description, printed out by nose"
if __name__ == '__main__':
nt = NetlistTest('mytest', sim_opts=myoptions())
nt.setUp()
nt.test()
Notice how a function ``test()`` is defined, as that will be
run by ``nose``, and a ``'__main__'`` block is defined too,
to allow running the script from the command line.
It is slightly non-standard, as :func:`NetlistTest.setUp()` and
:func:`NetlistTest.tearDown()` are called inside ``test()``, but this
was found to be an acceptable compromise between complexity and following
standard practices.
The script is meant to be run from the command line in case a regression
is detected by ``nose``, possibly with the aid of a debugger.
As such, the :func:`NetlistTest.tearDown()` function is not executed
in the ``'__main__'`` block, so that the test outputs are preserved for
inspection.
That said, the example file should be easy to understand and in most cases
a simple:
::
:%s/mytest/<test_id>/g
in VIM - will suffice to generate your own script file. Just remember to save
to ``test_<test_id>.py``.
Overview of a typical test based on :class:`APITest`
\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"
Required files
^^^^^^^^^^^^^^
The main directory must contain:
- ``test_<test_id>.py``, the script executing the test,
- the reference data files for checking the pass/fail status of the test.
These can be automatically generated, as it will be shown below.
Script file
'''''''''''
Again, it is probably easier to introduce the API test scripts with an example.
Below is a typical test script file:
::
import ahkab
from ahkab import ahkab, circuit, printing, devices, testing
cli = False
def test():
\"\"\"Test docstring to be printed out by nose\"\"\"
mycircuit = circuit.Circuit(title="Butterworth Example circuit", filename=None)
## define nodes
gnd = mycircuit.get_ground_node()
n1 = mycircuit.create_node('n1')
n2 = mycircuit.create_node('n2')
# ...
## add elements
mycircuit.add_resistor(name="R1", n1="n1", n2="n2", value=600)
mycircuit.add_inductor(name="L1", n1="n2", n2=gnd, value=15.24e-3)
mycircuit.add_vsource("V1", n1="n1", n2=gnd, dc_value=5, ac_value=.5)
# ...
if cli:
print(mycircuit)
## define analyses
op_analysis = ahkab.new_op(outfile='<test_id>')
ac_analysis = ahkab.new_ac(start=1e3, stop=1e5, points=100, outfile='<test_id>')
# ...
## create a testbench
testbench = testing.APITest('<test_id>', mycircuit,
[op_analysis, ac_analysis],
skip_on_travis=True, skip_on_pypy=True)
## setup and test
testbench.setUp()
testbench.test()
## this section is recommended. If something goes wrong, you may call the
## test from the cli and the plots to video in the following will allow
## for quick inspection
if cli:
## re-run the test to grab the results
r = ahkab.run(mycircuit, an_list=[op_analysis, ac_analysis])
## plot and save interesting data
fig = plt.figure()
plt.title(mycircuit.title + " - TRAN Simulation")
plt.plot(r['tran']['T'], r['tran']['VN1'], label="Input voltage")
plt.hold(True)
plt.plot(r['tran']['T'], r['tran']['VN4'], label="output voltage")
plt.legend()
plt.hold(False)
plt.grid(True)
plt.ylabel('Step response')
plt.xlabel('Time [s]')
fig.savefig('tran_plot.png')
else:
## don't forget to tearDown the testbench when under nose!
testbench.tearDown()
if __name__ == '__main__':
import pylab as plt
cli = True
test()
plt.show()
Once again, a function ``test()`` is defined, as that will be the
entry point of ``nose``, and a ``'__main__'`` block is defined as well,
to allow running the script from the command line.
Inside ``test()``, the circuit to be tested is defined, accessing the
``ahkab`` module directly, to set up elements, sources and analyses.
Directly calling :func:`ahkab.run()` is not necessary,
:func:`APITest.test()` will take care of that for you.
Notice how :func:`APITest.setUp()` and :func:`APITest.tearDown()` are
called inside ``test()``, as in the previous case.
The script is meant to be run from the command line in case a regression
is detected by ``nose``, possibly with the aid of a debugger.
As such, the :func:`APITest.tearDown()` function is not executed
in the ``'__main__'`` block, so that the test outputs are preserved for
inspection.
Additionally, plotting is performed if the test is directly run from
the command line.
In case non-standard simulation options are necessary, they can be set
as in the previous example.
Module reference
\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"
"""
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import time
import os
import sys
import pickle
import unittest
from warnings import warn
try:
from configparser import ConfigParser, NoOptionError
except ImportError:
from ConfigParser import ConfigParser, NoOptionError
import numpy as np
import sympy
from scipy.interpolate import InterpolatedUnivariateSpline
from sympy.parsing.sympy_parser import parse_expr
from nose.tools import ok_, nottest
from nose.plugins.skip import SkipTest
from . import csvlib
from . import options
from . import py3compat
from . import pz
from . import results
from .ahkab import main, run
class _MyDict(dict):
pass
@nottest
class NetlistTest(unittest.TestCase):
"""A class to run a netlist file and check the results against
a pre-computed reference.
**Parameters:**
test_id : string
The test id. For a netlist named ``"rc_network.ckt"``, this is
to be set to ``"rc_network"``.
er : float, optional
Allowed relative error (applies to numeric results only).
er : float, optional
Allowed absolute error (applies to numeric results only).
sim_opts : dict, optional
A dictionary containing the options to be used for the test.
verbose : int
The verbosity level to be used in the test. From 0 (silent) to
6 (verbose). Notice higher verbosity values usually result in
higher coverage. Defaults to 6.
"""
def __init__(self, test_id, er=1e-6, ea=1e-9, sim_opts=None, verbose=6):
unittest.TestCase.__init__(self, methodName='test')
self.test_id = test_id
self.er = er
self.ea = ea
self.test.__func__.__doc__ = "%s simulation" % (test_id, )
self.ref_data = {} # the reference results will be loaded here
self._sim_opts = sim_opts if sim_opts is not None else {}
self._reset_opts = {}
self.verbose=verbose
def _set_sim_opts(self, sim_opts):
for opt in sim_opts.keys():
if hasattr(options, opt):
self._reset_opts.update({opt:getattr(options, opt)})
setattr(options, opt, sim_opts[opt])
else:
raise ValueError("Option %s is not a valid option." % opt)
def _reset_sim_opts(self):
for opt in self._reset_opts:
setattr(options, opt, self._reset_opts[opt])
def setUp(self):
"""Set up the testbench."""
# find the needed files wrt the WD
# we may be called from <checkout-dir>/tests/<mytest>
# or from <checkout-dir>/tests/
# or from <checkout-dir>/
wd = os.getcwd()
if os.path.split(wd)[1] == self.test_id:
self.reference_path = "."
elif os.path.split(wd)[1] == 'tests':
self.reference_path = os.path.join(wd, self.test_id)
else:
self.reference_path = os.path.join(wd, 'tests', self.test_id)
if not os.path.isfile(os.path.join(self.reference_path,
'%s.ini' % self.test_id)):
raise IOError("Config file %s not found." %
os.path.join(self.reference_path,
'%s.ini' % self.test_id))
# read the test config from <test_id>.ini
cp = ConfigParser()
cp.read(os.path.join(self.reference_path, '%s.ini' % self.test_id))
# skipping on TRAVIS-CI option for time-consuming tests
self.skip = bool(int(cp.get('test', 'skip-on-travis')))
if 'TRAVIS' in os.environ and self.skip:
# skip even loading the references
return
# skipping on PYPY option for numeric tests
# Do we have the optional skip-on-pypy entry?
try:
self.skip_on_pypy = bool(int(cp.get('test', 'skip-on-pypy')))
except NoOptionError:
# Default to skipping on PYPY
self.skip_on_pypy = True
if py3compat.PYPY and self.skip_on_pypy:
# once again, skip even loading the references
return
assert self.test_id == cp.get('test', 'name')
netlist = cp.get('test', 'netlist')
self.netlist = os.path.join(self.reference_path, netlist)
del netlist
types = cp.get('test', 'type')
self.types = [t.strip().replace(',', '').lower()
for t in types.split(',')]
del types
# reference files holding the reference results
self.refs = {}
for t in self.types:
self.refs.update({t: os.path.join(self.reference_path,
cp.get('test', t + '_ref'))})
# files to be removed after the test is completed successfully
self.rmfiles = []
for i in self.types:
if i == 'op':
self.rmfiles.append(os.path.join(self.reference_path,
'%s.opinfo' %
self.test_id))
self.rmfiles.append(os.path.join(self.reference_path,
'%s.%s' %
(self.test_id, i)))
# Are we in a reference run?
self.ref_run = False
for i in list(self.refs.values()):
self.ref_run = not os.path.isfile(i)
if self.ref_run:
print("RUNNING REFERENCE RUN - INVALID TEST!")
break
if not self.ref_run:
self._load_references()
def _load_references(self):
for t, file_ref in list(self.refs.items()):
if 'pickle' in file_ref:
with open(file_ref, 'rb') as fp:
self.ref_data.update({t: pickle.load(fp)})
else:
data, headers, _, _ = csvlib.load_csv(file_ref, [], None, 0, verbose=0)
res = _MyDict()
if os.path.splitext(file_ref)[1][1:].lower() == 'ac':
res.update({headers[0]:data[0, :]})
for i, h in enumerate(headers):
if h[0] == h[-1] == '|':
pi = headers.index('arg('+h[1:-1]+')')
res.update({h[1:-1]:data[i, :]*np.exp(1j*data[pi, :])})
else:
continue
else:
for i, h in enumerate(headers):
res.update({h: data[i, :]})
res.x = headers[0]
self.ref_data.update({t: res})
def _run_test(self):
# check whether we are on travis or not and skip if needed.
# check whether we are running PYPY or not and skip if needed.
if ('TRAVIS' in os.environ and self.skip) or (py3compat.PYPY and
self.skip_on_pypy):
self._reset_sim_opts()
raise SkipTest
# no reference runs with nose
if sys.argv[0].endswith('nosetests') and self.ref_run:
self._reset_sim_opts()
raise SkipTest
self._set_sim_opts(self._sim_opts)
print("Running test... ", end="")
start = time.time()
res = main(filename=self.netlist,
outfile=os.path.join(self.reference_path, self.test_id),
verbose=self.verbose)
stop = time.time()
times = stop - start
print("done.\nThe test took %f s" % times)
return res
def _check(self, res, ref):
if hasattr(res, 'get_x'):
x = res.get_x()
for k in list(res.keys()):
if np.all(res[k] == x):
continue
elif np.any(np.iscomplex(res[k])) or np.any(np.iscomplex(ref[k])):
# Interpolate Re and Im of the results to compare.
x = x.reshape((-1, ))
refx = ref[ref.x].reshape((-1, ))
d1 = InterpolatedUnivariateSpline(x, np.real(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.real(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Re)" % self.test_id))
d1 = InterpolatedUnivariateSpline(x, np.imag(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.imag(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Im)" % self.test_id))
else:
# Interpolate the results to compare.
x = x.reshape((-1, ))
refx = ref[ref.x].reshape((-1, ))
d1 = InterpolatedUnivariateSpline(x, np.real_if_close(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.real_if_close(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
elif isinstance(res, results.op_solution):
for k in list(res.keys()):
assert k in ref
ok(res[k], ref[k], rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
elif isinstance(res, results.pz_solution):
# recover the reference signularities from Re/Im data
ref_sing_keys = list(ref.keys())[:]
ref_sing_keys.sort()
assert len(ref_sing_keys) % 2 == 0
ref_sing = [ref[ref_sing_keys[int(len(ref_sing_keys)/2) + k]] + ref[ref_sing_keys[k]]*1j \
for k in range(int(len(ref_sing_keys)/2))]
ref_poles_num = len([k for k in ref.keys() if k[:4] == 'Re(p'])
poles_ref, zeros_ref = ref_sing[:ref_poles_num], ref_sing[ref_poles_num:]
assert len(poles_ref) == len(res.poles)
pz._check_singularities(res.poles, poles_ref)
assert len(zeros_ref) == len(res.zeros)
pz._check_singularities(res.zeros, zeros_ref)
else:
if isinstance(res, list) or isinstance(res, tuple):
for i, j in zip(res, ref):
self._check(i, j)
elif res is not None:
for k in list(res.keys()):
assert k in ref
if isinstance(res[k], dict): # hence ref[k] will be a dict too
self._check(res[k], ref[k])
elif isinstance(ref[k], sympy.Basic) and isinstance(res[k], sympy.Basic):
# get rid of assumptions. Evaluate only expression
rf = parse_expr(str(ref[k]))
rs = parse_expr(str(res[k]))
assert (rs == rf) or (sympy.simplify(rf/rs) == 1)
else:
assert res[k] == ref[k]
def test(self):
"""Run the test."""
res = self._run_test()
if not self.ref_run:
for t in list(res.keys()):
ok_(t in self.ref_data, 'simulation %s not in the reference data' % t)
print("Checking results for %s analysis..." % t)
self._check(res[t], self.ref_data[t])
else:
for t, ref_file in list(self.refs.items()):
if '.pickle' in ref_file:
with open(ref_file, 'wb') as fp:
pickle.dump(res[t], fp, protocol=2)
else:
res_file = os.path.join(self.reference_path,
'%s.%s' % (self.test_id, t))
os.rename(res_file, ref_file)
def tearDown(self):
"""Remove temporary files - if needed."""
if self.ref_run:
pass
else:
for f in self.rmfiles:
os.remove(f)
self._reset_sim_opts()
@nottest
class APITest(unittest.TestCase):
"""A class to run a supplied circuit and check the results against
a pre-computed reference.
**Parameters:**
test_id : string
The test id.
circ : circuit instance
The circuit to be tested
an_list : list of dicts
A list of the analyses to be performed
er : float, optional
Allowed relative error (applies to numeric results only).
er : float, optional
Allowed absolute error (applies to numeric results only).
sim_opts : dict, optional
A dictionary containing the options to be used for the test.
skip_on_travis : bool, optional
Should we skip the test on Travis? Set to ``True`` for long tests.
Defaults to ``False``.
skip_on_pypy : bool, optional
Should we skip the test on PYPY? Set to ``True`` for tests requiring
libraries not supported by PYPY (eg. ``scipy``, ``matplotlib``).
Defaults to ``True``, as most numeric tests will fail.
"""
def __init__(self, test_id, circ, an_list, er=1e-6, ea=1e-9, sim_opts=None,
skip_on_travis=False, skip_on_pypy=True):
unittest.TestCase.__init__(self, methodName='test')
self.test_id = test_id
self.er = er
self.ea = ea
self.test.__func__.__doc__ = "%s simulation" % (test_id, )
self.ref_data = {} # the reference results will be loaded here
self.skip = skip_on_travis
self.skip_on_pypy = skip_on_pypy
self.circ = circ
self.an_list = an_list
self._sim_opts = sim_opts if sim_opts is not None else {}
self._reset_opts = {}
self._set_sim_opts(self._sim_opts)
self.res = None
for an in an_list:
if 'outfile' in an and self.test_id not in an['outfile']:
warn("W: Analysis %s has outfile set to %s" %
(an['type'], an['outfile']))
def _set_sim_opts(self, sim_opts):
for opt in sim_opts.keys():
if hasattr(options, opt):
self._reset_opts.update({opt:getattr(options, opt)})
setattr(options, opt, sim_opts[opt])
else:
raise ValueError("Option %s is not a valid option." % opt)
def _reset_sim_opts(self):
for opt in self._reset_opts:
setattr(options, opt, self._reset_opts[opt])
def setUp(self):
"""Set up the testbench"""
# find the needed files wrt the WD
# we may be called from <checkout-dir>/tests/<mytest>
# or from <checkout-dir>/tests/
# or from <checkout-dir>/
wd = os.getcwd()
if os.path.split(wd)[1] == self.test_id:
self.reference_path = "."
elif os.path.split(wd)[1] == 'tests':
self.reference_path = os.path.join(wd, self.test_id)
else:
self.reference_path = os.path.join(wd, 'tests', self.test_id)
if ('TRAVIS' in os.environ and self.skip) or (py3compat.PYPY and
self.skip_on_pypy):
# skip even loading the references
return
self.types = [a['type'] for a in self.an_list]
# reference files holding the reference results
self.refs = {}
for t in self.types:
self.refs.update({t: os.path.join(self.reference_path,
self.test_id + '-ref' + '.'+ t)})
# update the an_list with abs paths
for i in range(len(self.an_list)):
if 'outfile' in self.an_list[i] and \
self.an_list[i]['outfile'] is not None and \
not self.an_list[i]['outfile'] == 'stdout' and \
not (len(self.an_list[i]['outfile']) > 5 and \
self.an_list[i]['outfile'][:4] == '/tmp/'):
if not os.path.isabs(self.an_list[i]['outfile']):
self.an_list[i]['outfile'] = os.path.join(self.reference_path,
self.an_list[i]['outfile'])
# files to be removed after the test is completed successfully
self.rmfiles = []
for an in self.an_list:
if 'outfile' in an and \
an['outfile'] is not None and \
not an['outfile'] == 'stdout' and \
not (len(an['outfile']) > 5 and an['outfile'][:4] == '/tmp/'):
self.rmfiles.append(an['outfile'])
if an['type'] == 'op':
self.rmfiles.append(an['outfile'] + 'info')
# Are we in a reference run?
self.ref_run = False
for i in list(self.refs.values()):
self.ref_run = not os.path.isfile(i)
if self.ref_run:
print("RUNNING REFERENCE RUN - INVALID TEST!")
break
if not self.ref_run:
self._load_references()
def _load_references(self):
for t, file_ref in list(self.refs.items()):
if '.symbolic' in file_ref:
with open(file_ref, 'rb') as fp:
self.ref_data.update({t: pickle.load(fp)})
else:
data, headers, _, _ = csvlib.load_csv(file_ref, [], None, 0, verbose=0)
res = _MyDict()
if os.path.splitext(file_ref)[1][1:].lower() == 'ac':
res.update({headers[0]:data[0, :]})
for i, h in enumerate(headers):
if h[0] == h[-1] == '|':
pi = headers.index('arg('+h[1:-1]+')')
res.update({h[1:-1]:data[i, :]*np.exp(1j*data[pi, :])})
else:
continue
else:
for i, h in enumerate(headers):
res.update({h: data[i, :]})
res.x = headers[0] if not t == 'op' else None
self.ref_data.update({t: res})
def _run_test(self):
if ('TRAVIS' in os.environ and self.skip) or (py3compat.PYPY and
self.skip_on_pypy):
self._reset_sim_opts()
raise SkipTest
print("Running test... ", end=' ')
start = time.time()
res = run(self.circ, self.an_list)
stop = time.time()
times = stop - start
print("done.\nThe test took %f s" % times)
return res
def _check(self, res, ref):
if hasattr(res, 'get_x'):
x = res.get_x()
for k in list(res.keys()):
if np.all(res[k] == x):
continue
elif np.any(np.iscomplex(res[k])) or np.any(np.iscomplex(ref[k])):
# Interpolate Re and Im of the results to compare.
x = x.reshape((-1, ))
refx = ref[ref.x].reshape((-1, ))
d1 = InterpolatedUnivariateSpline(x, np.real(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.real(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Re)" % self.test_id))
d1 = InterpolatedUnivariateSpline(x, np.imag(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.imag(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED (Im)" % self.test_id))
else:
# Interpolate the results to compare.
x = x.reshape((-1, ))
refx = ref[ref.x].reshape((-1, ))
d1 = InterpolatedUnivariateSpline(x, np.real_if_close(res[k]).reshape((-1, )))
d2 = InterpolatedUnivariateSpline(refx, np.real_if_close(ref[k]).reshape((-1, )))
ok(d1(x), d2(x), rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
elif isinstance(res, results.op_solution):
for k in list(res.keys()):
assert k in ref
ok(res[k], ref[k], rtol=self.er, atol=self.ea, msg=("Test %s FAILED" % self.test_id))
else:
if isinstance(res, list) or isinstance(res, tuple):
self._check(res[0], ref)
elif res is not None:
for k in list(res.keys()):
assert k in list(ref.keys())
if isinstance(res[k], dict): # hence ref[k] will be a dict too
self._check(res[k], ref[k])
elif isinstance(ref[k], sympy.Basic) and isinstance(res[k], sympy.Basic):
# get rid of assumptions. Evaluate only expression
rf = parse_expr(str(ref[k]))
rs = parse_expr(str(res[k]))
assert (rs == rf) or (sympy.simplify(rf/rs) == 1)
else:
assert res[k] == ref[k]
def test(self):
"""Run the test."""
res = self._run_test()
if not self.ref_run:
for t in list(res.keys()):
ok_(t in self.ref_data, 'simulation %s not in the reference data')
print("Checking results for %s analysis..." % t)
self._check(res[t], self.ref_data[t])
else:
# move ref files into place
for an in self.an_list:
ref_file = self.refs[an['type']]
if not os.path.isabs(an['outfile']):
res_file = os.path.join(self.reference_path, an['outfile'])
else:
res_file = an['outfile']
os.rename(res_file, ref_file)
def tearDown(self):
"""Remove temporary files - if needed."""
if self.ref_run:
pass
else:
for f in self.rmfiles:
os.remove(f)
self._reset_sim_opts()
def ok(x, ref, rtol, atol, msg):
try:
assert np.allclose(x, ref, rtol=rtol, atol=atol)
except AssertionError:
print("REL: %g (max %g), ABS: %g (max %g)" % (max(abs(2*(x-ref)/(x+ref))), rtol, max(abs(x-ref)), atol))
raise AssertionError(msg)
| gpl-2.0 |
rjleveque/riemann_book | exact_solvers/interactive_pplanes.py | 3 | 17630 | """
Interactive phase plane plot for Euler equations with ideal gas,
Euler equations with Tammann equations of state and acoustic equations.
"""
import sys, os
import numpy as np
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from ipywidgets import widgets
from ipywidgets import interact
from IPython.display import display
def euler_phase_plane_plot():
"Return phase plane function ready to use with interact."
# Define hugoniot locus and intergal curves independently (needed for interact version)
def hugoniot_locus_1(p,ql,gamma):
rhol, ul, pl = ql
cl = np.sqrt(gamma*pl/rhol)
beta = (gamma+1.)/(gamma-1.)
return ul + 2*cl/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pl)/np.sqrt(1+beta*p/pl))
def hugoniot_locus_3(p,qr,gamma):
rhor, ur, pr = qr
cr = np.sqrt(gamma*pr/rhor)
beta = (gamma+1.)/(gamma-1.)
return ur - 2*cr/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/pr)/np.sqrt(1+beta*p/pr))
def integral_curve_1(p,ql,gamma):
rhol, ul, pl = ql
cl = np.sqrt(gamma*pl/rhol)
return ul + 2*cl/(gamma-1.)*(1.-(p/pl)**((gamma-1.)/(2.*gamma)))
def integral_curve_3(p,qr,gamma):
rhor, ur, pr = qr
cr = np.sqrt(gamma*pr/rhor)
return ur - 2*cr/(gamma-1.)*(1.-(p/pr)**((gamma-1.)/(2.*gamma)))
def plot_function(rhol,ul,pl,rhor,ur,pr,gamma,
xmin,xmax,ymin,ymax,show_phys,show_unphys):
"Subfunction required for interactive (function of only interactive parameters)."
ql = [rhol, ul, pl]
qr = [rhor, ur, pr]
hugoloc1 = lambda p: hugoniot_locus_1(p,ql,gamma)
hugoloc3 = lambda p: hugoniot_locus_3(p,qr,gamma)
intcurv1 = lambda p: integral_curve_1(p,ql,gamma)
intcurv3 = lambda p: integral_curve_3(p,qr,gamma)
def phi_l(p):
"Check whether the 1-wave is a shock or rarefaction."
if p >= pl:
return hugoloc1(p)
else:
return intcurv1(p)
# Check whether the 3-wave is a shock or rarefaction
def phi_r(p):
if p >= pr:
return hugoloc3(p)
else:
return intcurv3(p)
phi = lambda p: phi_l(p)-phi_r(p)
# Use fsolve to find p_star such that Phi(p_star)=0
p0 = (ql[2] + qr[2])/2.0 # initial guess is the average of initial pressures
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
u_star = 0.5*(phi_l(p_star) + phi_r(p_star))
# Set plot bounds
fig, ax = plt.subplots(figsize=(12,4))
x = (ql[2], qr[2], p_star)
y = (ql[1], qr[1], u_star)
dx, dy = xmax - xmin, ymax - ymin
ax.set_xlim(min(0.00000001,xmin),xmax)
ax.set_ylim(ymin,ymax)
ax.set_xlabel('Pressure (p)', fontsize=15)
ax.set_ylabel('Velocity (u)', fontsize=15)
p = np.linspace(xmin,xmax,500)
p1_shk = p[p>=pl]
p1_rar = p[p<pl]
p3_shk = p[p>=pr]
p3_rar = p[p<pr]
if show_unphys:
# Plot unphysical solutions
ax.plot(p1_rar,hugoloc1(p1_rar),'--r')
ax.plot(p3_rar,hugoloc3(p3_rar),'--r')
ax.plot(p1_shk,intcurv1(p1_shk),'--b')
ax.plot(p3_shk,intcurv3(p3_shk),'--b')
if show_phys:
# Plot physical solutions
ax.plot(p1_shk,hugoloc1(p1_shk),'-r')
ax.plot(p3_shk,hugoloc3(p3_shk),'-r')
ax.plot(p1_rar,intcurv1(p1_rar),'-b')
ax.plot(p3_rar,intcurv3(p3_rar),'-b')
if (p_star <= xmax and u_star >ymin and u_star < ymax):
ax.plot(p_star, u_star, '-ok', markersize=10)
ax.text(x[2] + 0.025*dx,y[2] + 0.025*dy, '$q_m$', fontsize=15)
# Plot initial states and markers
ax.plot(ql[2], ql[1], '-ok', markersize=10)
ax.plot(qr[2], qr[1], '-ok', markersize=10)
for i,label in enumerate(('$q_l$', '$q_r$')):
ax.text(x[i] + 0.025*dx,y[i] + 0.025*dy,label, fontsize=15)
plt.show()
return plot_function
def euler_interactive_phase_plane(ql=(1.0, -3.0, 100.0),
qr=(1.0, 3.0, 100.0),
gamma=1.4):
"Create the GUI and output the interact app."
# Create plot function for interact
pp_plot = euler_phase_plane_plot()
# Declare all widget sliders
ql1_widget = widgets.FloatSlider(value=ql[0],min=0.01,max=100.0, description=r'$\rho_l$')
ql2_widget = widgets.FloatSlider(value=ql[1],min=-15,max=15.0, description='$u_l$')
ql3_widget = widgets.FloatSlider(value=ql[2],min=1,max=200.0, description='$p_l$')
qr1_widget = widgets.FloatSlider(value=qr[0],min=0.01,max=100.0, description=r'$\rho_r$')
qr2_widget = widgets.FloatSlider(value=qr[1],min=-15,max=15.0, description='$u_r$')
qr3_widget = widgets.FloatSlider(value=qr[2],min=1,max=200.0, description='$p_r$')
gamm_widget = widgets.FloatSlider(value=gamma,min=0.01,max=10.0, description='$\gamma$')
xmin_widget = widgets.BoundedFloatText(value=0.0000001, description='$p_{min}:$')
xmax_widget = widgets.FloatText(value=200, description='$p_{max}:$')
ymin_widget = widgets.FloatText(value=-15, description='$u_{min}:$')
ymax_widget = widgets.FloatText(value=15, description='$u_{max}:$')
show_physical = widgets.Checkbox(value=True, description='Physical solution')
show_unphysical = widgets.Checkbox(value=True, description='Unphysical solution')
# Additional control widgets not called by function
rhomax_widget = widgets.FloatText(value=100, description=r'$\rho_{max}$')
gammax_widget = widgets.FloatText(value=10, description='$\gamma_{max}$')
# Allow for dependent widgets to update
def update_xmin(*args):
ql3_widget.min = xmin_widget.value
qr3_widget.min = xmin_widget.value
def update_xmax(*args):
ql3_widget.max = xmax_widget.value
qr3_widget.max = xmax_widget.value
def update_ymin(*args):
ql2_widget.min = ymin_widget.value
qr2_widget.min = ymin_widget.value
def update_ymax(*args):
ql2_widget.max = ymax_widget.value
qr2_widget.max = ymax_widget.value
def update_rhomax(*args):
ql1_widget.max = rhomax_widget.value
qr1_widget.max = rhomax_widget.value
def update_gammax(*args):
gamm_widget.max = gammax_widget.value
xmin_widget.observe(update_xmin, 'value')
xmax_widget.observe(update_xmax, 'value')
ymin_widget.observe(update_ymin, 'value')
ymax_widget.observe(update_ymax, 'value')
rhomax_widget.observe(update_rhomax, 'value')
gammax_widget.observe(update_gammax, 'value')
# Organize slider widgets into boxes
qleftright = widgets.VBox([widgets.HBox([ql1_widget, ql2_widget, ql3_widget]),
widgets.HBox([qr1_widget, qr2_widget, qr3_widget]),
widgets.HBox([gamm_widget])])
plot_opts = widgets.VBox([widgets.HBox([show_physical, show_unphysical]),
widgets.HBox([xmin_widget, xmax_widget, rhomax_widget]),
widgets.HBox([ymin_widget, ymax_widget, gammax_widget])])
# Set up interactive GUI (tab style)
interact_gui = widgets.Tab(children=[qleftright, plot_opts])
interact_gui.set_title(0, 'Left and right states')
interact_gui.set_title(1, 'Plot options')
# Define interactive widget and run GUI
ppwidget = interact(pp_plot, rhol=ql1_widget, ul=ql2_widget, pl=ql3_widget,
rhor=qr1_widget, ur=qr2_widget, pr=qr3_widget,
gamma=gamm_widget,
xmin=xmin_widget, xmax=xmax_widget,
ymin=ymin_widget, ymax=ymax_widget,
show_phys=show_physical, show_unphys=show_unphysical)
try:
ppwidget.widget.close()
display(interact_gui)
display(ppwidget.widget.out)
except:
pass
def euler_tammann_phase_plane_plot():
"Return phase plane function ready to use with interact."
# Define hugoniot locus and integral curves independently (needed for interact version)
def hugoniot_locus_1(p,ql,params):
gammal, pinfl = params
rhol, ul, pl = ql
betal = (pl + pinfl)*(gammal - 1.0)/(gammal + 1.0)
alphal = 2.0/((gammal + 1.0)*rhol)
return ul - (p - pl)*np.sqrt(alphal/(p + pinfl + betal))
def hugoniot_locus_3(p,qr,params):
gammar, pinfr = params
rhor, ur, pr = qr
betar = (pr + pinfr)*(gammar - 1.0)/(gammar + 1.0)
alphar = 2.0/((gammar + 1.0)*rhor)
return ur + (p - pr)*np.sqrt(alphar/(p + pinfr + betar))
def integral_curve_1(p,ql,params):
gammal, pinfl = params
rhol, ul, pl = ql
cl = np.sqrt(gammal*(pl + pinfl)/rhol)
gl1 = gammal - 1.0
return ul + 2*cl/gl1*(1 - ((p + pinfl)/(pl+pinfl))**(gl1/(2.0*gammal)))
def integral_curve_3(p,qr,params):
gammar, pinfr = params
rhor, ur, pr = qr
cr = np.sqrt(gammar*(pr + pinfr)/rhor)
gr1 = gammar - 1.0
return ur - 2*cr/gr1*(1 - ((p + pinfr)/(pr + pinfr))**(gr1/(2.0*gammar)))
def plot_function(rhol,ul,pl,rhor,ur,pr,gammal,pinfl,gammar,pinfr,
xmin,xmax,ymin,ymax,show_phys,show_unphys):
"Subfunction required for interactive (function of only interactive parameters)."
ql = [rhol, ul, pl]
qr = [rhor, ur, pr]
paramsl = [gammal, pinfl]
paramsr = [gammar, pinfr]
hugoloc1 = lambda p: hugoniot_locus_1(p,ql,paramsl)
hugoloc3 = lambda p: hugoniot_locus_3(p,qr,paramsr)
intcurv1 = lambda p: integral_curve_1(p,ql,paramsl)
intcurv3 = lambda p: integral_curve_3(p,qr,paramsr)
def phi_l(p):
"Check whether the 1-wave is a shock or rarefaction."
if p >= pl:
return hugoloc1(p)
else:
return intcurv1(p)
def phi_r(p):
"Check whether the 3-wave is a shock or rarefaction."
if p >= pr:
return hugoloc3(p)
else:
return intcurv3(p)
phi = lambda p: phi_l(p)-phi_r(p)
# Use fsolve to find p_star such that Phi(p_star)=0
p0 = (ql[2] + qr[2])/2.0 # initial guess is the average of initial pressures
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, xtol=1.e-14)
# For strong rarefactions, sometimes fsolve needs help
if ier != 1:
p_star, info, ier, msg = fsolve(phi, p0, full_output=True, factor=0.1, xtol=1.e-10)
# This should not happen:
if ier != 1:
print('Warning: fsolve did not converge.')
u_star = 0.5*(phi_l(p_star) + phi_r(p_star))
# Set plot bounds
fig, ax = plt.subplots(figsize=(12,4))
x = (ql[2], qr[2], p_star)
y = (ql[1], qr[1], u_star)
dx, dy = xmax - xmin, ymax - ymin
ax.set_xlim(min(0.00000001,xmin),xmax)
ax.set_ylim(ymin,ymax)
ax.set_xlabel('Pressure (p)', fontsize=15)
ax.set_ylabel('Velocity (u)', fontsize=15)
p = np.linspace(xmin,xmax,500)
p1_shk = p[p>=pl]
p1_rar = p[p<pl]
p3_shk = p[p>=pr]
p3_rar = p[p<pr]
# Plot unphysical solutions
if show_unphys:
ax.plot(p1_rar,hugoloc1(p1_rar),'--r')
ax.plot(p3_rar,hugoloc3(p3_rar),'--r')
ax.plot(p1_shk,intcurv1(p1_shk),'--b')
ax.plot(p3_shk,intcurv3(p3_shk),'--b')
# Plot physical solutions
if show_phys:
ax.plot(p1_shk,hugoloc1(p1_shk),'-r')
ax.plot(p3_shk,hugoloc3(p3_shk),'-r')
ax.plot(p1_rar,intcurv1(p1_rar),'-b')
ax.plot(p3_rar,intcurv3(p3_rar),'-b')
if (p_star <= xmax and u_star > ymin and u_star < ymax):
ax.plot(p_star, u_star, '-ok', markersize=10)
ax.text(x[2] + 0.025*dx,y[2] + 0.025*dy, '$q_m$', fontsize=15)
# Plot initial states and markers
ax.plot(ql[2], ql[1], '-ok', markersize=10)
ax.plot(qr[2], qr[1], '-ok', markersize=10)
for i,label in enumerate(('$q_l$', '$q_r$')):
ax.text(x[i] + 0.025*dx,y[i] + 0.025*dy,label, fontsize=15)
plt.show()
return plot_function
def euler_tammann_interactive_phase_plane(ql=(600.0, 10.0, 50000.0),
qr=(50.0, -10.0, 25000.0),
paramsl=(1.4, 0.0),
paramsr=(7.0, 100.0)):
"Create the GUI and output the interact app."
# Create plot function for interact
pp_plot = euler_tammann_phase_plane_plot()
# Declare all widget sliders
ql1_widget = widgets.FloatSlider(value=ql[0],min=0.01,max=1000.0, description=r'$\rho_l$')
ql2_widget = widgets.FloatSlider(value=ql[1],min=-15,max=15.0, description='$u_l$')
ql3_widget = widgets.FloatSlider(value=ql[2],min=1,max=200000.0, description='$p_l$')
qr1_widget = widgets.FloatSlider(value=qr[0],min=0.01,max=1000.0, description=r'$\rho_r$')
qr2_widget = widgets.FloatSlider(value=qr[1],min=-15,max=15.0, description='$u_r$')
qr3_widget = widgets.FloatSlider(value=qr[2],min=1,max=200000.0, description='$p_r$')
gamml_widget = widgets.FloatSlider(value=paramsl[0],min=0.01,max=10.0, description='$\gamma_l$')
gammr_widget = widgets.FloatSlider(value=paramsr[0],min=0.01,max=10.0, description='$\gamma_r$')
pinfl_widget = widgets.FloatSlider(value=paramsl[1],min=0.0,max=300000.0, description='$p_{\infty l}$')
pinfr_widget = widgets.FloatSlider(value=paramsr[1],min=0.0,max=300000.0, description='$p_{\infty r}$')
xmin_widget = widgets.BoundedFloatText(value=0.0000001, description='$p_{min}:$')
xmax_widget = widgets.FloatText(value=200000, description='$p_{max}:$')
ymin_widget = widgets.FloatText(value=-15, description='$u_{min}:$')
ymax_widget = widgets.FloatText(value=15, description='$u_{max}:$')
show_physical = widgets.Checkbox(value=True, description='Physical solution')
show_unphysical = widgets.Checkbox(value=True, description='Unphysical solution')
# Additional control widgets not called by function
rhomax_widget = widgets.FloatText(value=1000, description=r'$\rho_{max}$')
gammax_widget = widgets.FloatText(value=10, description='$\gamma_{max}$')
pinfmax_widget = widgets.FloatText(value=300000, description='$p_{\infty max}$')
# Allow for dependent widgets to update
def update_xmin(*args):
ql3_widget.min = xmin_widget.value
qr3_widget.min = xmin_widget.value
def update_xmax(*args):
ql3_widget.max = xmax_widget.value
qr3_widget.max = xmax_widget.value
def update_ymin(*args):
ql2_widget.min = ymin_widget.value
qr2_widget.min = ymin_widget.value
def update_ymax(*args):
ql2_widget.max = ymax_widget.value
qr2_widget.max = ymax_widget.value
def update_rhomax(*args):
ql1_widget.max = rhomax_widget.value
qr1_widget.max = rhomax_widget.value
def update_gammax(*args):
gamml_widget.max = gammax_widget.value
gammr_widget.max = gammax_widget.value
def update_pinfmax(*args):
pinfl_widget.max = pinfmax_widget.value
pinfr_widget.max = pinfmax_widget.value
xmin_widget.observe(update_xmin, 'value')
xmax_widget.observe(update_xmax, 'value')
ymin_widget.observe(update_ymin, 'value')
ymax_widget.observe(update_ymax, 'value')
rhomax_widget.observe(update_rhomax, 'value')
gammax_widget.observe(update_gammax, 'value')
pinfmax_widget.observe(update_pinfmax, 'value')
# Organize slider widgets into boxes
qleftright = widgets.VBox([widgets.HBox([ql1_widget, ql2_widget, ql3_widget]),
widgets.HBox([qr1_widget, qr2_widget, qr3_widget])])
params = widgets.HBox([widgets.VBox([gamml_widget, gammr_widget]),
widgets.VBox([pinfl_widget, pinfr_widget])])
plot_opts = widgets.HBox([widgets.VBox([show_physical, xmin_widget, ymin_widget]),
widgets.VBox([show_unphysical, xmax_widget, ymax_widget]),
widgets.VBox([rhomax_widget, gammax_widget, pinfmax_widget])])
# Set up interactive GUI (tab style)
interact_gui = widgets.Tab(children=[qleftright, params, plot_opts])
interact_gui.set_title(0, 'Left and right states')
interact_gui.set_title(1, 'Tammann EOS')
interact_gui.set_title(2, 'Plot options')
# Define interactive widget and run GUI
ppwidget = interact(pp_plot, rhol=ql1_widget, ul=ql2_widget, pl=ql3_widget,
rhor=qr1_widget, ur=qr2_widget, pr=qr3_widget,
gammal=gamml_widget, pinfl=pinfl_widget,
gammar=gammr_widget, pinfr=pinfr_widget,
xmin=xmin_widget, xmax=xmax_widget,
ymin=ymin_widget, ymax=ymax_widget,
show_phys=show_physical, show_unphys=show_unphysical)
ppwidget.widget.close()
display(interact_gui)
display(ppwidget.widget.out)
| bsd-3-clause |
jayfans3/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
nhmc/LAE | cloudy/comp7/final/model.py | 6 | 21973 | """
Make a likelihood function for use with emcee
Given input Z, nH, k_C, k_N, k_Al, aUV, NHI return ln
of the likelihood.
This module must define the following objects:
- a dictionary P with keys. The value of every key is a tuple with the
same length (the number of model parameters)
name : parameter names
min : minimum allowed parameter values
max : maximum allowed parameter values
- a model(par) function that generates the model of the data given
an array of parameter values
- a ln_likelihood(par) function
- a get_initial_positions(nwalkers) function that generates an array
of shape (nwalkers, npar) with parameter values for the initial
walker positions.
- a plot_model(par) function that plots a model fit to the data given a
set of parameter values, in the same order as they are listed in P.
- optionall a print_par(par) function.
AXIS ORDER for column densities:
NHI, nH, Z
Reverse this when using XY indexing (e.g. CloughTocher_Interpolator)
"""
from __future__ import division
from math import log, sqrt, pi
from barak.interp import CloughTocher2d_interpolator
from barak.utilities import adict
from barak.absorb import split_trans_name, get_ionization_energy
from barak.io import parse_config, loadobj
from cloudy.utils import read_observed as read_Nvals
import numpy as np
import os
from glob import glob
from barak.plot import arrplot, get_nrows_ncols, puttext
import astropy.units as u
from scipy.ndimage import map_coordinates
use_ipot = False
log10_cm_per_Mpc = np.log10((1*u.Mpc).to(u.cm).value)
class MapCoord_Interpolator:
""" A wrapper around scipy.indimage.map_coordinates
It creates an object that can be repeatedly called to perform
interpolation at different coordinate values.
"""
def __init__(self, data, vals):
"""
data : shape (m,n,o)
vals = [z,y,x]
where
x : shape (o,)
y : shape (n,)
z : shape (m,)
"""
assert data.shape == tuple(len(v) for v in vals)
self.val0 = [v[0] for v in vals]
self.dval = [v[1] - v[0] for v in vals]
self.data = data
def map_xyz_to_coord(self, vals):
""" Find the coordinates to give to map_coordinates from each
axis value.
"""
# Check we have as many coordinates as dimensions
assert len(vals) == len(self.data.shape)
vals = map(np.atleast_1d, vals)
coords = []
for i,v in enumerate(vals):
coords.append( (v - self.val0[i]) / self.dval[i])
return coords
def __call__(self, vals, order=3, **kwargs):
"""
note: order of vals is z,y,x
order : int (default 3)
Spline order.
Possible keywords:
for extrapolation outside the initial grid
mode='nearest'
"""
coords = self.map_xyz_to_coord(vals)
out = map_coordinates(self.data, coords, order=order, **kwargs)
return out
def make_interpolators_uvbtilt(trans, simnames):
""" Make interpolators including different UV slopes, given by the
simulation names.
simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),
uvb k values must be sorted in ascending order!
"""
Models = []
aUV = []
for simname in simnames:
# need to define prefix, SIMNAME
gridname = os.path.join(simname, 'grid.cfg')
print 'Reading', gridname
cfg = parse_config(gridname)
aUV.append(cfg.uvb_tilt)
name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
print 'Reading', name
M = loadobj(name)
M = adict(M)
Uconst = (M.U + M.nH)[0]
print 'Uconst', Uconst, cfg.uvb_tilt
assert np.allclose(Uconst, M.U + M.nH)
Models.append(M)
##########################################################################
# Interpolate cloudy grids onto a finer scale for plotting and
# likelihood calculation
##########################################################################
roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
Ncloudy = {}
Ncloudy_raw = {}
print 'Interpolating...'
for tr in trans + ['NH']:
shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
Nvals = np.zeros(shape)
if tr in ['CII*']:
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
elif tr == 'NH':
for i,M in enumerate(Models):
logNHI = M.N['H'][:,:,:,0]
logNHII = M.N['H'][:,:,:,1]
logNHtot = np.log10(10**logNHI + 10**logNHII)
Nvals[:,:,:,i] = logNHtot
else:
atom, stage = split_trans_name(tr)
ind = roman_map[stage]
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]
# use ndimage.map_coordinates (which is spline interpolation)
coord = M.NHI, M.nH, M.Z, aUV
try:
Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
except:
import pdb; pdb.set_trace()
Ncloudy_raw[tr] = Nvals
print 'done'
return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
def model(par, for_plot=False):
"""
This will interpolate between cloudy models and apply the relative
abundance variation multipliers and return a bunch of column
densities.
par = all the ks then NHI, nH, Z
or
par = all the ks then NHI, nH, Z, aUV
Returns
-------
Nmodel : list
The model log10 column density for each transition.
"""
if for_plot:
trvals = tr_plot
else:
trvals = trans
try:
coord = par[IND_PAR['NHI']], par[IND_PAR['nH']], par[IND_PAR['Z']], \
par[IND_PAR['aUV']]
except:
import pdb; pdb.set_trace()
Nmodel = []
for tr in trvals:
atom, stage = split_trans_name(tr)
N = Ncloudy[tr](coord)
if atom in IND_KPAR:
N += par[IND_KPAR[atom]]
Nmodel.append(N)
return np.array(Nmodel)
def ln_pdf_siglohi(x, x0, siglo, sighi):
""" ln of the pdf of an observation centred on an observed value with
different high and low sigmas.
Assumes gaussian distribution, but with different sigmas on either
side of the distribution.
pdf = k * exp(-1/2 * ((x - x0)/sighi)**2) for x > 0
k * exp(-1/2 * ((x - x0)/siglo)**2) for x <= 0
where k = 1 / (sqrt(pi/2) * (sighi + siglo))
x must be scalar
"""
#ln_k = -np.log(np.sqrt(0.5 * pi) * (sighi + siglo))
x = np.atleast_1d(x)
out = np.empty(x.shape)
c0 = x > x0
out[c0] = -0.5 * ((x[c0] - x0)/sighi)**2 #+ ln_k
out[~c0] = -0.5 * ((x[~c0] - x0)/siglo)**2 #+ ln_k
if out.shape == (1,):
return out[0]
else:
return out
def ln_pdf_uplim(x, x0, sighi):
""" Find ln(probability) for a series of model values given
an upper limit.
Assumes probability drops off as a gaussian of sigma sighi
"""
# if there is a lower limit too, this is the normalisation:
#ln_k = -log(sqrt(0.5*pi) * sighi + (x0 - lolim))
# but we'll say there is no lower limits, which means we can't
# normalise.
x = np.atleast_1d(x)
out = np.empty(x.shape)
c0 = x > x0
out[c0] = -0.5 * ((x[c0] - x0)/sighi)**2 #+ ln_k
out[~c0] = 0 # + ln_k
if out.shape == (1,):
return out[0]
else:
return out
def ln_pdf_lolim(x, x0, siglo):
""" Find ln of the pdf for an x value given an lower limit.
Assumes probability drops off as a gaussian of sigma siglo
"""
# if there is an upper limit too, this is the normalisation:
#ln_k = -log(sqrt(0.5*pi) * siglo + (uplim - x0))
# but we'll say there is no upper limit, which means we can't
# normalise.
x = np.atleast_1d(x)
out = np.empty(x.shape)
c0 = x > x0
out[c0] = 0 #+ ln_k
out[~c0] = -0.5 * ((x[~c0] - x0)/siglo)**2 #+ ln_k
if out.shape == (1,):
return out[0]
else:
return out
def ln_likelihood(par, per_obs=False):
""" Uses obs, trans.
if per_obs (default False), also return a separate likelihood for
each observation.
"""
# if we are outside the allowable parameter ranges, return 0
# likelihood.
#import pdb; pdb.set_trace()
for i,p in enumerate(par):
if not (P['min'][i] < p < P['max'][i]):
return -np.inf
# force the cloud thickness to be < 1 Mpc
coord = par[IND_PAR['NHI']], par[IND_PAR['nH']], par[IND_PAR['Z']], \
par[IND_PAR['aUV']]
logNH = Ncloudy['NH'](coord)
if (logNH - par[IND_PAR['nH']]) > log10_cm_per_Mpc:
return -np.inf
Nmodel = model(par)
lnprobtot = np.zeros(np.asarray(par[0]).shape)
for pname in priors:
if pname.startswith('min ') or pname.startswith('max '):
continue
# only deals with two-sided gaussian priors at the moment
pval, siglo, sighi = priors[pname]
p = ln_pdf_siglohi(par[IND_PAR[pname]], pval, siglo, sighi)
lnprobtot += p
allprob = []
for i,tr in enumerate(trans):
Nobs, siglo, sighi = obs[tr]
if siglo == 0:
#print(tr, 'lower limit')
p = ln_pdf_lolim(Nmodel[i], Nobs, SIG_LIMIT)
lnprobtot += p
if per_obs:
allprob.append(p)
elif sighi == 0:
#print(tr, 'upper limit')
p = ln_pdf_uplim(Nmodel[i], Nobs, SIG_LIMIT)
lnprobtot += p
if per_obs:
allprob.append(p)
else:
#print(tr)
siglo = max(siglo, MIN_SIG)
sighi = max(sighi, MIN_SIG)
p = ln_pdf_siglohi(Nmodel[i], Nobs, siglo, sighi)
lnprobtot += p
if per_obs:
allprob.append(p)
if per_obs:
return lnprobtot, allprob
else:
return lnprobtot
def get_initial_positions(nwalkers):
# Get initial parameter positions (guesses!) for each walker
Npar = len(P['names'])
# one possibility:
# generate random values from a normal distribution with a 1
# sigma width 5 times smaller than the limits for each parameter.
#p0 = np.random.randn(nwalkers, Npar)
#for i in range(Npar):
# p0[:, i] = P.true[i] + p0[:, i] * (P.max[i] - P.min[i]) / nsigma
# # clip so we are inside the parameter limits
# p0[:, i] = p0[:, i].clip(P.min[i], P.max[i])
# another possibility:
#
# uniform distribution between parameter limits
p0 = np.random.uniform(size=(nwalkers, Npar))
p1 = np.random.normal(size=(nwalkers, Npar))
for i in range(Npar):
if P['names'][i] in priors:
pval, siglo, sighi = priors[P['names'][i]]
# gaussian
p0[:, i] = pval + p1[:, i] * 0.5 * (siglo + sighi)
else:
p0[:, i] = P['min'][i] + p0[:, i] * (P['max'][i] - P['min'][i])
return p0
def plot_model(pars):
""" Plot the observed values and errors, along with the predicted
model values.
"""
import matplotlib.pyplot as pl
from barak.plot import draw_arrows, puttext
fig = pl.figure(figsize=(6.4, 3.4))
ax = fig.add_subplot(111)
ms = 6
ipot = [get_ionization_energy(t) for t in tr_plot]
for i,tr in enumerate(tr_plot):
if use_ipot:
ind = ipot[i]
else:
ind = i
if tr in obs:
colour = 'k' if tr in trans else 'w'
fmt = 'o' + colour
val, siglo, sighi = obs[tr]
if siglo == 0:
draw_arrows(ind, val, direction='up', ax=ax,lw=1)
ax.plot(ind, val, fmt,ms=ms)
elif sighi == 0:
draw_arrows(ind, val, direction='down', ax=ax,lw=1)
ax.plot(ind, val, fmt,ms=ms)
else:
ax.plot([ind, ind], [val-siglo, val+sighi], 'k',lw=1)
ax.plot(ind, val, fmt,ms=ms)
ax.text(ind, val + 0.8, tr,
fontsize=10, ha='center')
else:
puttext(ind, 0.02, tr, ax=ax, xcoord='data',
fontsize=10, ha='center')
puttext(0.9,0.1, 'Model', ax, color='r', ha='right')
xvals = list(range(len(tr_plot)))
#print np.asarray(pars).shape
for par in pars:
Nmodel = model(par, for_plot=True)
if use_ipot:
if len(pars) == 1:
ax.plot(ipot, Nmodel, 'r.-', lw=1, zorder=0)
else:
ax.plot(ipot, Nmodel, 'r-', lw=0.2, zorder=0)
else:
if len(pars) == 1:
ax.plot(xvals, Nmodel, 'r.-', lw=1, zorder=0)
else:
ax.plot(xvals, Nmodel, 'r-', lw=0.2, zorder=0)
if use_ipot:
ax.set_xlabel('Ionization potential (eV)')
ax.set_xlim(ipot[0]-1, ipot[-1] + 1)
else:
ax.set_xlim(-0.5, xvals[-1] + 0.5)
ax.set_xticks([])
ax.set_ylabel(r'$\log_{10}\ N$')
fig.tight_layout()
#return fig, ax
return fig
def print_par(par):
""" Print the maximum likelihood parameters and their
uncertainties.
"""
rec = []
for i in range(len(P['names'])):
p = P['ml'][i]
pmed = P['median'][i]
m1 = P['p1sig'][i]
p0 = 0.5 * (m1[0] + m1[1])
sig1 = 0.5 * (m1[1] - m1[0])
m2 = P['p2sig'][i]
j1 = P['p1sig_joint'][i]
j2 = P['p2sig_joint'][i]
rec.append( (P['names'][i], p0, sig1, m1[0], m1[1],
m2[0], m2[1], j1[0], j1[1],
j2[0], j2[1], pmed, p) )
names = 'name,cen,sig,m1l,m1u,m2l,m2u,j1l,j1u,j2l,j2u,med,ml'
rec = np.rec.fromrecords(rec, names=names)
hd = """\
# name : parameter name
# cen : central value (half way between the marginalised 1 sigma region)
# sig : 1 sigma error around central value
# m1l : 1 sigma lower level (marginalised over all other parameters)
# m1u : 1 sigma upper level (marginalised)
# m2l : 2 sigma lower level (marginalised)
# m2u : 2 sigma upper level (marginalised)
# j1l : 1 sigma lower level (joint with all other parameters)
# j1u : 1 sigma upper level (joint)
# j2l : 2 sigma lower level (joint)
# j2u : 2 sigma upper level (joint)
# ml : maximum likelihood value
# med : median value
"""
from barak.io import writetxt
writetxt('fig/pars.txt', rec, header=hd, fmt_float='.4g', overwrite=1)
if 1:
##################################################
# Read configuration file, set global variables
##################################################
cfgname = 'model.cfg'
# we only need the cfg file for the prefix of the cloudy runs and
# the name of the file with the observed column densities.
opt = parse_config(cfgname)
testing = 0
MIN_SIG = float(opt['min_sig'])
SIG_LIMIT = 0.05
# H is an honorary alpha element here; it just means no offset is
# added.
ALPHA_ELEMENTS = 'Si O Mg S Ca Ne Ti H'.split()
FEPEAK_ELEMENTS = 'Fe Cr Mn Co Ni'.split()
simnames = sorted(glob(opt['simname']))
assert len(simnames) > 0
if 1:
##################################################
# Read the observed column densities and errors
##################################################
obs = read_Nvals('observed_logN')
print 'Observed transitions'
print obs
trans_obs = sorted(obs)
# don't do DI or HI
trans_obs.remove('HI')
if 'DI' in trans_obs:
trans_obs.remove('DI')
priors = read_Nvals('priors')
if 'NHI' not in priors:
priors['NHI'] = obs['HI']
print "Priors found:"
print priors
trans = list(trans_obs)
fh = open('dont_use')
dont_use =[]
for row in fh:
row = row.strip()
if row == '' or row.startswith('#'):
continue
dont_use.append(row)
fh.close()
print "Don't use these transitions in fitting:"
print dont_use
for tr in dont_use:
if tr in trans:
trans.remove(tr)
print 'Using these transitions'
print trans
if 1:
################################################################
# Read the cloudy grids and make the interpolators
################################################################
tr_plot = ('MgI CaII OI OII OIII OIV MgII FeII FeIII SiII AlII CII AlIII '
'NI NII NIII NIV SiIII SII SIV SV '
'SiIV CIII CIV NV OVI').split()
ipot = [get_ionization_energy(t) for t in tr_plot]
isort = np.argsort(ipot)
tr_plot = [tr_plot[i] for i in isort]
assert all(tr in tr_plot for tr in trans)
Ncloudy, Ncloudy_raw, Models, aUV = make_interpolators_uvbtilt(
tr_plot, simnames)
M = Models[0]
if 0 and testing:
# check they look ok
nrows, ncols = get_nrows_ncols(len(trans) * 2)
fig = pl.figure(figsize=(8.4, 8.4))
Z = np.linspace(M.Z[0], M.Z[-1], 100)
nH = np.linspace(M.nH[0], M.nH[-1], 101)
nH1, Z1 = np.meshgrid(nH, Z, indexing='ij')
for i,tr in enumerate(trans):
ax0 = fig.add_subplot(nrows, ncols, 2*i + 1)
ax1 = fig.add_subplot(nrows, ncols, 2*i + 2)
arrplot(Ncloudy_raw[tr].T, x=M.nH, y=M.Z, ax=ax0, colorbar=0)
z = Ncloudy[tr]((nH1, Z1))
arrplot(z.T, x=nH, y=Z, ax=ax1, colorbar=0)
ax0.set_title(tr)
pl.show()
if 1:
######################################################
# Work out which parameters we're estimating
######################################################
vals = []
# Only estimate the multipliers we can fit for, based on the observed
# transitions
atoms = set(split_trans_name(tr)[0] for tr in trans)
#if any(atom in ALPHA_ELEMENTS for atom in atoms):
# vals.append( ('k_alpha', -1, 1) )
if 'C' in atoms and 'k_C' not in dont_use:
vals.append( ('k_C', priors['min k_C'], priors['max k_C']) )
if 'Al' in atoms and 'k_Al' not in dont_use:
vals.append( ('k_Al', priors['min k_Al'], priors['max k_Al']) )
if 'N' in atoms and 'k_N' not in dont_use:
vals.append( ('k_N' , priors['min k_N'], priors['max k_N']) )
if any(atom in FEPEAK_ELEMENTS for atom in atoms) \
and 'k_Fe' not in dont_use:
vals.append( ('k_Fe', priors['min k_Fe'], priors['max k_Fe']) )
# These limits will be set later on, based on the grids used to generate
# the cloudy models.
vmin = M['NHI'][0] if 'min NHI' not in priors else priors['min NHI']
vmax = M['NHI'][-1] if 'max NHI' not in priors else priors['max NHI']
vals.append( ('NHI', vmin, vmax) )
vmin = M['nH'][0] if 'min nH' not in priors else priors['min nH']
vmax = M['nH'][-1] if 'max nH' not in priors else priors['max nH']
vals.append( ('nH', vmin, vmax) )
vmin = M['Z'][0] if 'min Z' not in priors else priors['min Z']
vmax = M['Z'][-1] if 'max Z' not in priors else priors['max Z']
vals.append( ('Z', vmin, vmax) )
vmin = aUV[0] if 'min aUV' not in priors else priors['min aUV']
vmax = aUV[-1] if 'max aUV' not in priors else priors['max aUV']
vals.append( ('aUV', vmin, vmax) )
print 'min max priors:'
print zip(*vals)
P = {}
P['names'], P['min'], P['max'] = zip(*vals)
print P
# dictionary that maps an input atom to the k parameter index,
# used in model().
IND_KPAR = {}
# dictionary mapping parameter names to indices also used in
# model()
IND_PAR = {}
# we take alpha elements to define the metallicity.
#if 'k_alpha' in P['names']:
# i = P['names'].index('k_alpha')
# IND_KPAR.update(Si=i, O=i, Mg=i, S=i, Ca=i, Ne=i, Ar=i, Ti=i)
# Fe peak elements
if 'k_Fe' in P['names']:
i = P['names'].index('k_Fe')
IND_KPAR.update(Fe=i, Cr=i, Mn=i, Co=i, Ni=i)
IND_PAR.update(k_Fe=i)
if 'k_C' in P['names']:
i = P['names'].index('k_C')
IND_KPAR['C'] = i
IND_PAR.update(k_C=i)
if 'k_N' in P['names']:
i = P['names'].index('k_N')
IND_KPAR['N'] = i
IND_PAR.update(k_N=i)
if 'k_Al' in P['names']:
i = P['names'].index('k_Al')
IND_KPAR['Al'] = i
IND_PAR.update(k_Al=i)
IND_PAR.update({'Z' : P['names'].index('Z'),
'nH' : P['names'].index('nH'),
'NHI' : P['names'].index('NHI')
})
if 'aUV' in P['names']:
IND_PAR['aUV'] = P['names'].index('aUV')
if 1 and testing:
# test likelihood (no k or uvtilt)
Z = np.linspace(M.Z[0], M.Z[-1], 100)
nH = np.linspace(M.nH[0], M.nH[-1], 101)
NHI = np.linspace(M.NHI[0], M.NHI[-1], 101)
Z1, nH1 = np.meshgrid(Z, nH)
lnprob, alllnprob = ln_likelihood((Z1,nH1,0,0,0,0,0), per_obs=1)
nrows, ncols = get_nrows_ncols(len(trans))
fig = pl.figure(figsize=(8.4, 8.4))
for i,tr in enumerate(trans):
ax = fig.add_subplot(nrows, ncols, i + 1)
c0 = alllnprob[i] < 0
if c0.sum():
vmin = np.percentile(alllnprob[i][c0], 50)
else:
vmin = -0.1
arrplot(alllnprob[i].T, x=nH, y=Z, ax=ax, colorbar=0,vmin=vmin)
ax.set_title(tr)
if 1 and testing:
# test total
fig = pl.figure(figsize=(4.4, 4.4))
ax = fig.add_subplot(111)
c0 = alllnprob[i] < 0
vmin = np.percentile(alllnprob[i][c0], 50)
arrplot(lnprob.T, x=nH, y=Z, ax=ax, colorbar=0, vmin=vmin)
pl.show()
if 1 and testing:
# check the interpoaltion is working by testing a couple of points
# from the input grid.
M = Models[0]
alpha = aUV[0]
plot(M.NHI, M.N['C'][:,4,4,1], 'o-')
# see which values we're using
print M.nH[4], M.Z[4]
coord = 0,0,0,14, M.nH[4], M.Z[4], alpha; model(coord)[2]
| mit |
mne-tools/mne-tools.github.io | 0.19/_downloads/8ed64d7c92012e6fcb6501cd8cdb8d25/plot_40_sensor_locations.py | 1 | 11176 | """
.. _tut-sensor-locations:
Working with sensor locations
=============================
This tutorial describes how to read and plot sensor locations, and how
the physical location of sensors is handled in MNE-Python.
.. contents:: Page contents
:local:
:depth: 2
As usual we'll start by importing the modules we need and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, preload=True, verbose=False)
###############################################################################
# About montages and layouts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# MNE-Python comes pre-loaded with information about the sensor positions of
# many MEG and EEG systems. This information is stored in *layout files* and
# *montages*. :class:`Layouts <mne.channels.Layout>` give sensor positions in 2
# dimensions (defined by ``x``, ``y``, ``width``, and ``height`` values for
# each sensor), and are primarily used for illustrative purposes (i.e., making
# diagrams of approximate sensor positions in top-down diagrams of the head).
# In contrast, :class:`montages <mne.channels.DigMontage>` contain sensor
# positions in 3D (``x``, ``y``, ``z``, in meters). Many layout and montage
# files are included during MNE-Python installation, and are stored in your
# ``mne-python`` directory, in the :file:`mne/channels/data/layouts` and
# :file:`mne/channels/data/montages` folders, respectively:
data_dir = os.path.join(os.path.dirname(mne.__file__), 'channels', 'data')
for subfolder in ['layouts', 'montages']:
print('\nBUILT-IN {} FILES'.format(subfolder[:-1].upper()))
print('======================')
print(sorted(os.listdir(os.path.join(data_dir, subfolder))))
###############################################################################
# .. sidebar:: Computing sensor locations
#
# If you are interested in how standard ("idealized") EEG sensor positions
# are computed on a spherical head model, the `eeg_positions`_ repository
# provides code and documentation to this end.
#
# As you may be able to tell from the filenames shown above, the included
# montage files are all for EEG systems. These are *idealized* sensor positions
# based on a spherical head model. Montage files for MEG systems are not
# provided because the 3D coordinates of MEG sensors are included in the raw
# recordings from MEG systems, and are automatically stored in the ``info``
# attribute of the :class:`~mne.io.Raw` file upon loading. In contrast, layout
# files *are* included for MEG systems (to facilitate easy plotting of MEG
# sensor location diagrams).
#
# You may also have noticed that the file formats and filename extensions of
# layout and montage files vary considerably. This reflects different
# manufacturers' conventions; to simplify this, the montage and layout loading
# functions in MNE-Python take the filename *without its extension* so you
# don't have to keep track of which file format is used by which manufacturer.
# Examples of this can be seen in the following sections.
#
# If you have digitized the locations of EEG sensors on the scalp during your
# recording session (e.g., with a Polhemus Fastrak digitizer), these can be
# loaded in MNE-Python as :class:`~mne.channels.DigMontage` objects; see
# :ref:`reading-dig-montages` (below).
#
#
# Working with layout files
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To load a layout file, use the :func:`mne.channels.read_layout`
# function, and provide the filename *without* its file extension. You can then
# visualize the layout using its :meth:`~mne.channels.Layout.plot` method, or
# (equivalently) by passing it to :func:`mne.viz.plot_layout`:
biosemi_layout = mne.channels.read_layout('biosemi')
biosemi_layout.plot() # same result as: mne.viz.plot_layout(biosemi_layout)
###############################################################################
# Similar to the ``picks`` argument for selecting channels from
# :class:`~mne.io.Raw` objects, the :meth:`~mne.channels.Layout.plot` method of
# :class:`~mne.channels.Layout` objects also has a ``picks`` argument. However,
# because layouts only contain information about sensor name and location (not
# sensor type), the :meth:`~mne.channels.Layout.plot` method only allows
# picking channels by index (not by name or by type). Here we find the indices
# we want using :func:`numpy.where`; selection by name or type is possible via
# :func:`mne.pick_channels` or :func:`mne.pick_types`.
midline = np.where([name.endswith('z') for name in biosemi_layout.names])[0]
biosemi_layout.plot(picks=midline)
###############################################################################
# If you're working with a :class:`~mne.io.Raw` object that already has sensor
# positions incorporated, you can create a :class:`~mne.channels.Layout` object
# with either the :func:`mne.channels.make_eeg_layout` function or
# (equivalently) the :func:`mne.channels.find_layout` function.
layout_from_raw = mne.channels.make_eeg_layout(raw.info)
# same result as: mne.channels.find_layout(raw.info, ch_type='eeg')
layout_from_raw.plot()
###############################################################################
# .. note::
#
# There is no corresponding ``make_meg_layout`` function because sensor
# locations are fixed in a MEG system (unlike in EEG, where the sensor caps
# deform to fit each subject's head). Thus MEG layouts are consistent for a
# given system and you can simply load them with
# :func:`mne.channels.read_layout`, or use :func:`mne.channels.find_layout`
# with the ``ch_type`` parameter, as shown above for EEG.
#
# All :class:`~mne.channels.Layout` objects have a
# :meth:`~mne.channels.Layout.save` method that allows writing layouts to disk,
# in either :file:`.lout` or :file:`.lay` format (which format gets written is
# inferred from the file extension you pass to the method's ``fname``
# parameter). The choice between :file:`.lout` and :file:`.lay` format only
# matters if you need to load the layout file in some other software
# (MNE-Python can read either format equally well).
#
#
# Working with montage files
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Built-in montages are loaded and plotted in a very similar way to layouts.
# However, the :meth:`~mne.channels.DigMontage.plot` method of
# :class:`~mne.channels.DigMontage` objects has some additional parameters,
# such as whether to display channel names or just points (the ``show_names``
# parameter) and whether to display sensor positions in 3D or as a 2D topomap
# (the ``kind`` parameter):
ten_twenty_montage = mne.channels.make_standard_montage('standard_1020')
ten_twenty_montage.plot(show_names=False)
fig = ten_twenty_montage.plot(kind='3d')
fig.gca().view_init(azim=70, elev=15)
###############################################################################
# Similar functionality is also available with the
# :meth:`~mne.io.Raw.plot_sensors` method of :class:`~mne.io.Raw` objects,
# again with the option to plot in either 2D or 3D.
# :meth:`~mne.io.Raw.plot_sensors` also allows channel selection by type, can
# color-code channels in various ways (by default, channels listed in
# ``raw.info['bads']`` will be plotted in red), and allows drawing into an
# existing matplotlib ``axes`` object (so the channel positions can easily be
# made as a subplot in a multi-panel figure):
fig = plt.figure()
ax2d = fig.add_subplot(121)
ax3d = fig.add_subplot(122, projection='3d')
raw.plot_sensors(ch_type='eeg', axes=ax2d)
raw.plot_sensors(ch_type='eeg', axes=ax3d, kind='3d')
ax3d.view_init(azim=70, elev=15)
###############################################################################
# .. _reading-dig-montages:
#
# Reading sensor digitization files
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It's probably evident from the 2D topomap above that there is some
# irregularity in the EEG sensor positions in the :ref:`sample dataset
# <sample-dataset>` — this is because the sensor positions in that dataset are
# digitizations of the sensor positions on an actual subject's head. Depending
# on what system was used to scan the positions one can use different
# reading functions (:func:`mne.channels.read_dig_captrack` for
# a CapTrak Brain Products system, :func:`mne.channels.read_dig_egi`
# for an EGI system, :func:`mne.channels.read_dig_polhemus_isotrak` for
# Polhemus ISOTRAK, :func:`mne.channels.read_dig_fif` to read from
# a `.fif` file or :func:`mne.channels.read_dig_hpts` to read MNE `.hpts`
# files. The read :class:`montage <mne.channels.DigMontage>` can then be added
# to :class:`~mne.io.Raw` objects with the :meth:`~mne.io.Raw.set_montage`
# method; in the sample data this was done prior to saving the
# :class:`~mne.io.Raw` object to disk, so the sensor positions are already
# incorporated into the ``info`` attribute of the :class:`~mne.io.Raw` object.
# See the documentation of the reading functions and
# :meth:`~mne.io.Raw.set_montage` for further details. Once loaded,
# locations can be plotted with :meth:`~mne.channels.DigMontage.plot` and
# saved with :meth:`~mne.channels.DigMontage.save`, like when working
# with a standard montage.
#
# The possibilities to read in digitized montage files are summarized
# in :ref:`dig-formats`.
#
# .. note::
#
# When setting a montage with :meth:`~mne.io.Raw.set_montage`
# the measurement info is updated at two places (the `chs`
# and `dig` entries are updated). See :ref:`tut-info-class`.
# `dig` will potentially contain more than channel locations,
# such HPI, head shape points or fiducials 3D coordinates.
#
# Rendering sensor position with mayavi
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is also possible to render an image of a MEG sensor helmet in 3D, using
# mayavi instead of matplotlib, by calling the :func:`mne.viz.plot_alignment`
# function:
fig = mne.viz.plot_alignment(raw.info, trans=None, dig=False, eeg=False,
surfaces=[], meg=['helmet', 'sensors'],
coord_frame='meg')
mne.viz.set_3d_view(fig, azimuth=50, elevation=90, distance=0.5)
###############################################################################
# :func:`~mne.viz.plot_alignment` requires an :class:`~mne.Info` object, and
# can also render MRI surfaces of the scalp, skull, and brain (by passing
# keywords like ``'head'``, ``'outer_skull'``, or ``'brain'`` to the
# ``surfaces`` parameter) making it useful for :ref:`assessing coordinate frame
# transformations <plot_source_alignment>`. For examples of various uses of
# :func:`~mne.viz.plot_alignment`, see
# :doc:`../../auto_examples/visualization/plot_montage`,
# :doc:`../../auto_examples/visualization/plot_eeg_on_scalp`, and
# :doc:`../../auto_examples/visualization/plot_meg_sensors`.
#
# .. LINKS
#
# .. _`eeg_positions`: https://github.com/sappelhoff/eeg_positions
| bsd-3-clause |
rohit12/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/cluster/spectral.py | 5 | 19195 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None, optional, default: None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
y : Ignored
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| mit |
TomAugspurger/pandas | pandas/tests/tseries/holiday/test_holiday.py | 2 | 8620 | from datetime import datetime
import pytest
from pytz import utc
import pandas._testing as tm
from pandas.tseries.holiday import (
MO,
SA,
AbstractHolidayCalendar,
DateOffset,
EasterMonday,
GoodFriday,
Holiday,
HolidayCalendarFactory,
Timestamp,
USColumbusDay,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
get_calendar,
next_monday,
)
def _check_holiday_results(holiday, start, end, expected):
"""
Check that the dates for a given holiday match in date and timezone.
Parameters
----------
holiday : Holiday
The holiday to check.
start : datetime-like
The start date of range in which to collect dates for a given holiday.
end : datetime-like
The end date of range in which to collect dates for a given holiday.
expected : list
The list of dates we expect to get.
"""
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert list(
holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(end)))
) == [utc.localize(dt) for dt in expected]
@pytest.mark.parametrize(
"holiday,start_date,end_date,expected",
[
(
USMemorialDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
],
),
(
Holiday("July 4th Eve", month=7, day=3),
"2001-01-01",
"2003-03-03",
[Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")],
),
(
Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)),
"2001-01-01",
"2008-03-03",
[
Timestamp("2001-07-03 00:00:00"),
Timestamp("2002-07-03 00:00:00"),
Timestamp("2003-07-03 00:00:00"),
Timestamp("2006-07-03 00:00:00"),
Timestamp("2007-07-03 00:00:00"),
],
),
(
EasterMonday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-25 00:00:00"),
Timestamp("2012-04-09 00:00:00"),
Timestamp("2013-04-01 00:00:00"),
Timestamp("2014-04-21 00:00:00"),
Timestamp("2015-04-06 00:00:00"),
Timestamp("2016-03-28 00:00:00"),
Timestamp("2017-04-17 00:00:00"),
Timestamp("2018-04-02 00:00:00"),
Timestamp("2019-04-22 00:00:00"),
Timestamp("2020-04-13 00:00:00"),
],
),
(
GoodFriday,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
Timestamp("2011-04-22 00:00:00"),
Timestamp("2012-04-06 00:00:00"),
Timestamp("2013-03-29 00:00:00"),
Timestamp("2014-04-18 00:00:00"),
Timestamp("2015-04-03 00:00:00"),
Timestamp("2016-03-25 00:00:00"),
Timestamp("2017-04-14 00:00:00"),
Timestamp("2018-03-30 00:00:00"),
Timestamp("2019-04-19 00:00:00"),
Timestamp("2020-04-10 00:00:00"),
],
),
(
USThanksgivingDay,
datetime(2011, 1, 1),
datetime(2020, 12, 31),
[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
],
),
],
)
def test_holiday_dates(holiday, start_date, end_date, expected):
_check_holiday_results(holiday, start_date, end_date, expected)
@pytest.mark.parametrize(
"holiday,start,expected",
[
(USMemorialDay, datetime(2015, 7, 1), []),
(USMemorialDay, "2015-05-25", "2015-05-25"),
(USLaborDay, datetime(2015, 7, 1), []),
(USLaborDay, "2015-09-07", "2015-09-07"),
(USColumbusDay, datetime(2015, 7, 1), []),
(USColumbusDay, "2015-10-12", "2015-10-12"),
(USThanksgivingDay, datetime(2015, 7, 1), []),
(USThanksgivingDay, "2015-11-26", "2015-11-26"),
(USMartinLutherKingJr, datetime(2015, 7, 1), []),
(USMartinLutherKingJr, "2015-01-19", "2015-01-19"),
(USPresidentsDay, datetime(2015, 7, 1), []),
(USPresidentsDay, "2015-02-16", "2015-02-16"),
(GoodFriday, datetime(2015, 7, 1), []),
(GoodFriday, "2015-04-03", "2015-04-03"),
(EasterMonday, "2015-04-06", "2015-04-06"),
(EasterMonday, datetime(2015, 7, 1), []),
(EasterMonday, "2015-04-05", []),
("New Years Day", "2015-01-01", "2015-01-01"),
("New Years Day", "2010-12-31", "2010-12-31"),
("New Years Day", datetime(2015, 7, 1), []),
("New Years Day", "2011-01-01", []),
("July 4th", "2015-07-03", "2015-07-03"),
("July 4th", datetime(2015, 7, 1), []),
("July 4th", "2015-07-04", []),
("Veterans Day", "2012-11-12", "2012-11-12"),
("Veterans Day", datetime(2015, 7, 1), []),
("Veterans Day", "2012-11-11", []),
("Christmas", "2011-12-26", "2011-12-26"),
("Christmas", datetime(2015, 7, 1), []),
("Christmas", "2011-12-25", []),
],
)
def test_holidays_within_dates(holiday, start, expected):
# see gh-11477
#
# Fix holiday behavior where holiday.dates returned dates outside
# start/end date, or observed rules could not be applied because the
# holiday was not in the original date range (e.g., 7/4/2015 -> 7/3/2015).
if isinstance(holiday, str):
calendar = get_calendar("USFederalHolidayCalendar")
holiday = calendar.rule_from_name(holiday)
if isinstance(expected, str):
expected = [Timestamp(expected)]
_check_holiday_results(holiday, start, start, expected)
@pytest.mark.parametrize(
"transform", [lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)]
)
def test_argument_types(transform):
start_date = datetime(2011, 1, 1)
end_date = datetime(2020, 12, 31)
holidays = USThanksgivingDay.dates(start_date, end_date)
holidays2 = USThanksgivingDay.dates(transform(start_date), transform(end_date))
tm.assert_index_equal(holidays, holidays2)
@pytest.mark.parametrize(
"name,kwargs",
[
("One-Time", dict(year=2012, month=5, day=28)),
(
"Range",
dict(
month=5,
day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)),
),
),
],
)
def test_special_holidays(name, kwargs):
base_date = [datetime(2012, 5, 28)]
holiday = Holiday(name, **kwargs)
start_date = datetime(2011, 1, 1)
end_date = datetime(2020, 12, 31)
assert base_date == holiday.dates(start_date, end_date)
def test_get_calendar():
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar("TestCalendar")
assert TestCalendar == type(calendar)
def test_factory():
class_1 = HolidayCalendarFactory(
"MemorialDay", AbstractHolidayCalendar, USMemorialDay
)
class_2 = HolidayCalendarFactory(
"Thanksgiving", AbstractHolidayCalendar, USThanksgivingDay
)
class_3 = HolidayCalendarFactory("Combined", class_1, class_2)
assert len(class_1.rules) == 1
assert len(class_2.rules) == 1
assert len(class_3.rules) == 2
def test_both_offset_observance_raises():
# see gh-10217
msg = "Cannot use both offset and observance"
with pytest.raises(NotImplementedError, match=msg):
Holiday(
"Cyber Monday",
month=11,
day=1,
offset=[DateOffset(weekday=SA(4))],
observance=next_monday,
)
| bsd-3-clause |
ryanjmccall/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/table.py | 69 | 16757 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <jng@europe.renre.com>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist
from patches import Rectangle
from cbook import is_string_like
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
artist.kwdocd['Table'] = artist.kwdoc(Table)
| gpl-3.0 |
sumspr/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
Udzu/pudzu | dataviz/flagsrwbpercent.py | 1 | 2803 | from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import scipy.stats
df = pd.read_csv("datasets/flagsrwbpercent.csv").set_index("country")
class HeraldicPalette(metaclass=NamedPaletteMeta):
ARGENT = "#ffffff"
AZURE = "#0f47af"
GULES = "#da121a"
SABLE = "#00ff00"
def flag_image(c):
return Image.from_url_with_cache(df.flag[c]).convert("RGBA").remove_transparency("#00ff00").to_palette(HeraldicPalette).convert("RGBA")
def histogram(c):
img = flag_image(c)
cols = { next(n for n,p in zip(HeraldicPalette.names, HeraldicPalette) if RGBA(c) == p) : v for v,c in img.getcolors() if RGBA(c) != HeraldicPalette.SABLE}
cols = valfilter(lambda v: v > 10000, cols)
return valmap(lambda v: v / sum(cols.values()), cols)
df['histogram'] = [histogram(c) for c in df.index]
df["W"] = df["histogram"].apply(lambda x: x.get("ARGENT", 0)) * 100
df["R"] = df["histogram"].apply(lambda x: x.get("GULES", 0)) * 100
df["B"] = df["histogram"].apply(lambda x: x.get("AZURE", 0)) * 100
import ternary
from ternary.helpers import project_point
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
def getImage(c):
flag = Image.from_url_with_cache(df.flag[c]).convert("RGBA").resize_fixed_aspect(height=40).pad(1 if c != "Nepal" else (1,0,0,0), "black")
path = "cache/flags/{}.png".format(c)
flag.save(path)
return OffsetImage(plt.imread(path), dpi_cor=False)
figure, tax = ternary.figure(scale=100)
#tax.set_title("Color composition of Red-White-Blue flags".upper(), fontsize=16, pad=20, weight="heavy")
#tax.right_corner_label("red", fontsize=10)
#tax.top_corner_label("white", fontsize=10)
#tax.left_corner_label("blue", fontsize=10)
tax.bottom_axis_label("% red", fontsize=10, offset=0.07)
tax.right_axis_label("% white", fontsize=10, offset=0.14)
tax.left_axis_label("% blue", fontsize=10, offset=0.14)
tax.boundary(linewidth=2.0)
tax.gridlines(multiple=10, color="grey")
#points = df[['R', 'W', 'B']].values
#tax.scatter(points, marker='s', color='red')
for c in df.index:
x, y = project_point(df[['R', 'W', 'B']].loc[c])
ab = AnnotationBbox(getImage(c), (x, y), frameon=False)
tax.get_axes().add_artist(ab)
tax.ticks(axis='lbr', linewidth=1, multiple=20, offset=0.02, tick_formats="%d%%")
tax.get_axes().axis('off')
tax.clear_matplotlib_ticks()
tax.savefig("cache/flags/flagsrwbpercent.png")
chart = Image.open("cache/flags/flagsrwbpercent.png").trim((0,40))
img = Image.from_column([
Image.from_text("Color composition of Red-White-Blue flags".upper(), sans(48, bold=True), padding=(0,10)),
chart], bg="white")
img.place(Image.from_text("/u/Udzu", sans(16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/flagsrwbpercent.png")
| mit |
jpinedaf/pyspeckit | setup.py | 2 | 3802 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, adjust_compiler,
get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.1.25.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy', 'matplotlib', 'numpy'],
install_requires=['astropy', 'numpy', 'matplotlib>=1.4'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
**package_info
)
| mit |
jseabold/scikit-learn | sklearn/ensemble/tests/test_forest.py | 26 | 41675 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
anurag313/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
EPFL-LCN/neuronaldynamics-exercises | neurodynex3/test/test_hopfield.py | 1 | 1162 | # import matplotlib
# matplotlib.use("Agg") # needed for plotting on travis
def test_pattern_factory():
""" Test hopfield_network.pattern_tools """
import neurodynex3.hopfield_network.pattern_tools as tools
pattern_size = 6
factory = tools.PatternFactory(pattern_size)
p1 = factory.create_checkerboard()
assert len(p1) == pattern_size
def test_overlap():
""" Test hopfield_network.pattern_tools overlap"""
import neurodynex3.hopfield_network.pattern_tools as tools
pattern_size = 10
factory = tools.PatternFactory(pattern_size)
p1 = factory.create_checkerboard()
p2 = factory.create_all_on()
overlap = tools.compute_overlap(p1, p2)
assert overlap == 0.0 # works for checkerboards with even valued size
def test_load_alphabet():
"""Test if the alphabet patterns can be loaded"""
import neurodynex3.hopfield_network.pattern_tools as pattern_tools
abc_dictionary = pattern_tools.load_alphabet()
assert 'A' in abc_dictionary, \
"Alphabet dict not correctly loaded. Key not accessible"
assert abc_dictionary['A'].shape == (10, 10), \
"Letter is not of shape (10,10)"
| gpl-2.0 |
kkozarev/mwacme | src/plot_max_spectra_synchrotron_integrated_subset.py | 2 | 10688 | import glob, os, sys,fnmatch
import matplotlib.pyplot as plt
from astropy.io import ascii
import numpy as np
def match_list_values(ls1,ls2):
#Return lists of the indices where the values in two lists match
#It will return only the first index of occurrence of repeating values in the lists
#Written by Kamen Kozarev, with help from stackoverflow:
#http://stackoverflow.com/questions/1388818/how-can-i-compare-two-lists-in-python-and-return-matches
#http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order
#list1=['03:39:04','03:40:04','03:41:15','03:43:20','03:45:39']
#list2=['03:39:04','03:41:15','03:43:20','03:45:39','03:40:04','03:40:04']
list1=list(ls1)
list2=list(ls2)
matches=set(list1).intersection(list2)
indlist1=[i for i,item in enumerate(list1) if item in matches]
indlist2=[i for i,item in enumerate(list2) if item in matches]
matchlist1=np.array(list1)[indlist1]
matchlist2=np.array(list2)[indlist2]
seen=set()
seen_add = seen.add
damn=[i for i,item in enumerate([x for x in matchlist1 if not
(x in seen or seen_add(x))]) if item in matchlist1]
findlist1=list(np.array(indlist1)[damn])
seen=set()
seen_add = seen.add
damn=[i for i,item in enumerate([x for x in matchlist2 if not
(x in seen or seen_add(x))]) if item in matchlist2]
findlist2=list(np.array(indlist2)[damn])
return findlist1,findlist2
#The new data location
if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
CHANNELS= ['062-063','069-070','076-077','084-085','093-094','103-104']#'113-114','125-126','139-140','153-154','169-170','187-188'
CHANNELS= ['069-070','076-077','084-085','093-094']
reference_channel=CHANNELS[0]
polarization='XX'
INDIR=BASEDIR
date='2015/11/04 '
OBSIDS=['1130642640','1130642936','1130643240','1130643536','1130643840','1130644136','1130644440','1130644736','1130645040']
force=0 #Overwrite files if present
finchan=[8,14] #the fine channel indices
maxindices=['1','2']#,
avgperiod=1. #seconds over which to average
plotbtemp=0
plotintflux=1
plotmaxflux=0
datadir=BASEDIR+'synchrotron/subset/'
for maxindex in maxindices:
maxinfo={}
for CHANNEL in CHANNELS:
#GET the maximum information for the image.
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
timestrings=[]
print maxfile
if os.path.exists(datadir+maxfile):
#maxdata=ascii.read(datadir+maxfile)
#maxintens=maxdata['maxintens']
#maxlocx_px=maxdata['maxlocx_px']
#maxlocy_px=maxdata['maxlocy_px']
#times=maxdata['times']
maxintens=[]
maxlocx_px=[]
maxlocy_px=[]
intintens=[]
times=[]
f=open(datadir+maxfile,'r')
f.readline()
for line in f:
v1,v2,v3,v4,v5,v6 = line.split()
time=(v4+' '+v5).split('"')[1]
maxintens.append(float(v1))
maxlocx_px.append(int(v2))
maxlocy_px.append(float(v3))
times.append(time)
intintens.append(float(v6))
f.close()
for time in times:
tmp=''.join(time.split(' ')[1].split(':'))
timestrings.append(tmp)
#Calculate the frequencies
tmp=CHANNEL.split('-')
basefreq=int(tmp[0])*1.28 #base frequency in MHz
startfreq=basefreq+finchan[0]*0.04 #Starting frequency
endfreq=basefreq+finchan[1]*0.04 #Starting frequency
midfreq=np.mean([startfreq,endfreq])
#Populate the info dictionary
maxinfo[CHANNEL]={'times':times,'timestrings':timestrings,'startfreq':startfreq,'endfreq':endfreq,'midfreq':midfreq,
'maxintens':maxintens,'integrated_flux':intintens,'maxlocx_px':maxlocx_px,'maxlocy_px':maxlocy_px} #,'fnames':img_list
reference_times=maxinfo[reference_channel]['times']
frequencies=[info['midfreq'] for info in maxinfo.values()]
freqerror=[(info['endfreq']-info['startfreq']) for info in maxinfo.values()]
frequencies.sort()
if plotbtemp > 0:
#Brightness temperature defined as Tb = (I/nu^2)*(c^2)/(2*k)
#weighting=(c^2)/(2*k) = 3.26e39
kb=1.38064852e-23
c2=9.e16
JANSKY2SI=1.e-26
weighting=3.26e39*JANSKY2SI
weighting=np.divide(weighting,midfreq*midfreq*1.e12)
else:
#Plot the Flux density in SFU = 1.e-4 Jy
#weighting=1.e-4
weighting=1.
#allmaxima=[info['maxintens'] for info in maxinfo.values()]
#datadir=BASEDIR+'subset/'
cc=0
totmax=[]
allfluxes=[]
allintfluxes=[]
#Start writing the spectra to a file
specfile='max'+maxindex+'_'+polarization+'_spectra.txt'
outf=open(datadir+specfile,'w')
tmp=''
for CHANNEL in CHANNELS: tmp=tmp+' '+str(maxinfo[CHANNEL]['midfreq'])+' 000'
outf.write("Date Time" + tmp+'\n')
outf.close()
#Do the same but for the integrated flux spectra
intspecfile='max'+maxindex+'_'+polarization+'_spectra_integrated.txt'
outf=open(datadir+intspecfile,'w')
tmp=''
for CHANNEL in CHANNELS: tmp=tmp+' '+str(maxinfo[CHANNEL]['midfreq'])+' 000'
outf.write("Date Time" + tmp+'\n')
outf.close()
timeindices={}
for CHANNEL in CHANNELS:
if CHANNEL == reference_channel: continue
refind,chanind=match_list_values(reference_times,maxinfo[CHANNEL]['times'])
timeindices[CHANNEL]=chanind
for ind,time in enumerate(reference_times):
maxima=[]
intfluxes=[]
rmses=[]
timestring=''.join(time.split(' ')[1].split(':'))
for CHANNEL in CHANNELS:
if time in maxinfo[CHANNEL]['times']:
chantimind=maxinfo[CHANNEL]['times'].index(time)
intfluxes.append(maxinfo[CHANNEL]['integrated_flux'][chantimind])
maxima.append(maxinfo[CHANNEL]['maxintens'][chantimind])
else:
intfluxes.append(0.)
maxima.append(0.)
if ind == 0:
totintflux=intfluxes
totmax=maxima
else:
totintflux=np.add(totintflux,intfluxes)
totmax=np.add(totmax,maxima)
cc=cc+1
if cc == avgperiod:
totintflux=np.divide(totintflux,1.*avgperiod)
totmax=np.divide(totmax,1.*avgperiod)
brighttemp=np.multiply(totmax,weighting)
#brighttemp_error=np.multiply(totrms,weighting)
fluxdens=np.multiply(totmax,weighting)
#fluxdens_error=np.multiply(totrms,weighting)
intfluxdens=np.multiply(totintflux,weighting)
allfluxes.append(fluxdens)
allintfluxes.append(intfluxdens)
#Save the peak fluxes
outf=open(datadir+specfile,'a')
fluxdstring=''
for ii,dd in enumerate(fluxdens):
fluxdstring=fluxdstring + ' {:e}'.format(fluxdens[ii]) + ' 0.0'
fluxdstring=fluxdstring+'\n'
outf.write(time+fluxdstring)
outf.close()
#Save the integrated fluxes
outf=open(datadir+intspecfile,'a')
intfluxdstring=''
for ii,dd in enumerate(intfluxdens):
intfluxdstring=intfluxdstring + ' {:e}'.format(intfluxdens[ii]) + ' 0.0'
intfluxdstring=intfluxdstring+'\n'
outf.write(time+intfluxdstring)
outf.close()
#Plot spectra
if plotbtemp > 0 or plotmaxflux > 0:
fig, ax = plt.subplots()
if plotbtemp > 0:
ax.step(frequencies,brighttemp,color='r',where='mid')
ax.set_yscale('log')
plt.plot(frequencies,brighttemp,'ro')
plt.errorbar(frequencies,brighttemp,xerr=freqerror,yerr=brighttemp_error,fmt='-')
plt.ylabel("Brightness Temperature, K")
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,min(brighttemp)*0.9,max(brighttemp)*1.1))
if plotmaxflux>0:
#ax.step(frequencies,fluxdens,color='r',where='mid')
ax.set_yscale('log')
#ax.set_xscale('log')
plt.plot(frequencies,fluxdens,'r-',drawstyle='steps-mid',linewidth=2)
plt.plot(frequencies,fluxdens,'o',linewidth=2)
plt.ylabel("Peak Flux")
#plt.errorbar(frequencies,fluxdens,xerr=freqerror,yerr=fluxdens_error,fmt='o')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,1.e2,1.e4))
#plt.axis((x1,x2,min(fluxdens)*0.9,max(fluxdens)*1.1))
plt.xlabel("Frequency (MHz)")
plt.title("Max"+maxindex+" spectrum "+time)
plt.savefig(datadir+'Max'+maxindex+'_spectra_t'+timestring+'_'+polarization+"_synchrotron.png")
plt.close()
#PLOT THE INTEGRATED FLUXES
if plotintflux>0:
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.tick_params(axis='x',labelsize=22,length=15,width=2)
ax.tick_params(axis='y',labelsize=22,length=16,width=2)
ax.tick_params(axis='y',length=8,width=2,which='minor')
cpos=ax.get_position()
ax.set_position([cpos.x0+0.02,cpos.y0+0.03,cpos.x1-0.09,cpos.y1-0.09])
plt.plot(frequencies,intfluxdens,'r-',drawstyle='steps-mid',linewidth=3)
plt.plot(frequencies,intfluxdens,'o',linewidth=2)
plt.ylabel("Integrated Flux",fontsize=22)
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,8.e4,3.e6))
plt.xlabel("Frequency (MHz)",fontsize=22)
lines = {'linestyle': 'None'}
plt.rc('lines', **lines)
#plt.title("Max"+maxindex+" spectrum "+time)
plt.savefig(datadir+'Max'+maxindex+'_spectra_t'+timestring+'_'+polarization+"_synchrotron_integrated.png")
plt.close()
cc=0
#print np.array(allfluxes).shape
#spectral_table={'times':reference_times,'intfluxes':allintfluxes}
#ascii.write(spectral_table,datadir+'max'+maxindex+'_'+polarization+'_integral_spectra.txt',
# formats={'times':'%4u', 'intfluxes':'%.4f'})
| gpl-2.0 |
henridwyer/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
droundy/fac | bench/plot-benchmark.py | 1 | 3227 | #!/usr/bin/python3
from __future__ import print_function
import os, sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import cats
import hierarchy
import dependentchains
import sleepy
import independent
#matplotlib.rc('font', size='16.0')
datadir = os.getcwd()+'/data/'
modules = [dependentchains, cats, hierarchy, sleepy, independent]
allcolors = ['r','b','g','k','c','y','m', 'r']
allpatterns = ['o-', 's:', '*-.', 'x--', '.-', '<-', '>-', 'v-']
tool_patterns = {}
fslabels, fshandles = [], []
toollabels, toolhandles = [], []
mod = None
for m in modules:
if m.name == sys.argv[1]:
mod = m
if mod is None:
print("Invalid mod: ", sys.argv[1])
exit(1)
verb = sys.argv[2]
if verb not in mod.verbs:
print("Invalid verb: ", sys.argv[2])
exit(1)
dates = os.listdir(datadir)
dates.sort()
date = dates[-1]
# os.chdir(datadir+date+'/'+mod.name)
print('date', date, mod.name)
plt.figure(figsize=(6,4.3))
plt.title('%s %s on %s' % (verb, mod.name, date))
have_handled = {}
num_fs = len(os.listdir(datadir+date+'/'+mod.name+'/fac -j4'))
tools = os.listdir(datadir+date+'/'+mod.name)
tools.sort()
for tool in tools:
if not tool in tool_patterns:
tool_patterns[tool] = allpatterns[0]
allpatterns = allpatterns[1:]
if num_fs == 1:
mycolor = allcolors[0]
allcolors = allcolors[1:]
toollabels.append(tool)
toolhandles.append(plt.Line2D((0,1),(0,0), marker=tool_patterns[tool][0],
linestyle=tool_patterns[tool][1:], color='k'))
for fs in os.listdir(datadir+date+'/'+mod.name+'/'+tool):
if num_fs > 1 and not fs in fs_colors:
mycolor = allcolors[0]
allcolors = allcolors[1:]
fslabels.append(fs)
fshandles.append(plt.Line2D((0,1),(0,0), color=fs_colors[fs], linewidth=3))
data = np.loadtxt(datadir+date+'/'+mod.name+'/'+tool+'/'+fs+'/'+verb+'.txt')
# The folowing few lines handles the case where we
# have run the benchmark a few times, and have
# redundant data. We sort it, and then replace all
# the points with a given N with the average of all
# the measurements (from that date).
if len(data.shape) == 2:
for n in data[:,0]:
ind = data[:,0] == n
data[ind,1] = np.mean(data[ind,1])
data = np.sort(np.vstack({tuple(row) for row in data}), axis=0) # remove duplicate lines
if num_fs > 1:
mylabel = '%s on %s' % (tool, fs)
else:
mylabel = tool
plt.loglog(data[:,0], data[:,1]/data[:,0],
tool_patterns[tool],
color=mycolor,
label=mylabel)
plt.gca().grid(True)
plt.xlabel('$N$')
plt.ylabel('$t/N$ (s)')
if num_fs > 1:
plt.legend(fshandles+toolhandles, fslabels+toollabels, loc='best', frameon=False)
else:
plt.legend(loc='best', frameon=False)
plt.tight_layout()
# plt.savefig('../web/%s-%s.pdf' % (mod.name, verb))
plt.savefig('../web/%s-%s.svg' % (mod.name, verb), dpi=60)
# plt.savefig('../web/%s-%s.png' % (mod.name, verb), dpi=100)
| gpl-2.0 |
Srisai85/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
buguen/pylayers | pylayers/gis/srtm.py | 1 | 14250 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Pylint: Disable name warningsos.path.join(self.directory,continent)
# pylint: disable-msg=C0103
"""Load and process SRTM data."""
#import xml.dom.minidom
from HTMLParser import HTMLParser
import ftplib
import urllib2
import re
import pickle
import os.path
import os
import zipfile
import array
import math
import pdb
import matplotlib.pyplot as plt
import numpy as np
class NoSuchTileError(Exception):
"""Raised when there is no tile for a region."""
def __init__(self, lat, lon):
Exception.__init__()
self.lat = lat
self.lon = lon
def __str__(self):
return "No SRTM tile for %d, %d available!" % (self.lat, self .lon)
class WrongTileError(Exception):
"""Raised when the value of a pixel outside the tile area is reque sted."""
def __init__(self, tile_lat, tile_lon, req_lat, req_lon):
Exception.__init__()
self.tile_lat = tile_lat
self.tile_lon = tile_lon
self.req_lat = req_lat
self.req_lon = req_lon
def __str__(self):
return "SRTM tile for %d, %d does not contain data for %d, %d!" % (
self.tile_lat, self.tile_lon, self.req_lat, self.req_lon)
class InvalidTileError(Exception):
"""Raised when the SRTM tile file contains invalid data."""
def __init__(self, lat, lon):
Exception.__init__()
self.lat = lat
self.lon = lon
def __str__(self):
return "SRTM tile for %d, %d is invalid!" % (self.lat, self.lon)
class SRTMDownloader:
"""Automatically download SRTM tiles."""
def __init__(self, server="dds.cr.usgs.gov",
directory=os.path.join('srtm','version2_1','SRTM3'),
cachedir="cache",
protocol="http"):
self.protocol = protocol
self.server = server
self.directory = directory
self.cachedir = cachedir
print "SRTMDownloader - server= %s, directory=%s." % \
(self.server, self.directory)
if not os.path.exists(cachedir):
os.mkdir(cachedir)
self.filelist = {}
self.filename_regex = re.compile(r"([NS])(\d{2})([EW])(\d{3})\.hgt\.zip")
self.filelist_file = os.path.join(self.cachedir,"filelist_python")
self.ftpfile = None
self.ftp_bytes_transfered = 0
def loadFileList(self):
"""Load a previously created file list or create a new one if none is
available."""
try:
data = open(self.filelist_file, 'rb')
except IOError:
print "No cached file list. Creating new one!"
self.createFileList()
return
try:
self.filelist = pickle.load(data)
except:
print "Unknown error loading cached file list. Creating new one!"
self.createFileList()
def createFileList(self):
"""SRTM data is split into different directories, get a list of all of
them and create a dictionary for easy lookup."""
if self.protocol == "ftp":
ftp = ftplib.FTP(self.server)
try:
ftp.login()
ftp.cwd(self.directory)
continents = ftp.nlst()
for continent in continents:
print "Downloading file list for", continent
ftp.cwd(os.path.join(self.directory,continent))
files = ftp.nlst()
for filename in files:
s.path.join(self.directory,continent)
self.filelist[self.parseFilename(filename)] = (
continent, filename)
finally:
ftp.close()
# Add meta info
self.filelist["server"] = self.server
self.filelist["directory"] = self.directory
with open(self.filelist_file , 'wb') as output:
pickle.dump(self.filelist, output)
else:
self.createFileListHTTP()
def createFileListHTTP(self):
"""Create a list of the available SRTM files on the server using
HTTP file transfer protocol (rather than ftp).
30may2010 GJ ORIGINAL VERSION
"""
print "createFileListHTTP"
conn = urllib2.Request('http://'+self.server+'/'+self.directory)
r1 = urllib2.urlopen(conn)
#if r1.status==200:
# print "status200 received ok"
#else:
# print "oh no = status=%d %s" \
# % (r1.status,r1.reason)
data = r1.read()
parser = parseHTMLDirectoryListing()
parser.feed(data)
continents = parser.getDirListing()
print continents
for continent in continents:
print "Downloading file list for", continent
conn = urllib2.Request('http://'+self.server+'/'+self.directory+'/'+continent)
r1 = urllib2.urlopen(conn)
data = r1.read()
parser = parseHTMLDirectoryListing()
parser.feed(data)
files = parser.getDirListing()
for filename in files:
self.filelist[self.parseFilename(filename)] = (
continent, filename)
#print self.filelist
# Add meta info
self.filelist["server"] = self.server
self.filelist["directory"] = self.directory
with open(self.filelist_file , 'wb') as output:
pickle.dump(self.filelist, output)
def parseFilename(self, filename):
"""Get lat/lon values from filename."""
match = self.filename_regex.match(filename)
if match is None:
# TODO?: Raise exception?
print "Filename", filename, "unrecognized!"
return None
lat = int(match.group(2))
lon = int(match.group(4))
if match.group(1) == "S":
lat = -lat
if match.group(3) == "W":
lon = -lon
return lat, lon
def getTile(self, lat, lon):
"""Get a SRTM tile object. This function can return either an SRTM1 or
SRTM3 object depending on what is available, however currently it
only returns SRTM3 objects."""
try:
continent, filename = self.filelist[(int(lat), int(lon))]
print filename
except KeyError:
raise NoSuchTileError(lat, lon)
if not os.path.exists(os.path.join(self.cachedir,filename)):
self.downloadTile(continent, filename)
# TODO: Currently we create a new tile object each time.
# Caching is required for improved performance.
return SRTMTile(os.path.join(self.cachedir,filename), int(lat), int(lon))
def downloadTile(self, continent, filename):
"""Download a tile from NASA's server and store it in the cache."""
if self.protocol=="ftp":
ftp = ftplib.FTP(self.server)
try:
ftp.login()
ftp.cwd(os.path.join(self.directory,continent))
# WARNING: This is not thread safe
self.ftpfile = open(os.path.join(self.cachedir,filename), 'wb')
self.ftp_bytes_transfered = 0
print ""
try:
ftp.retrbinary("RETR "+filename, self.ftpCallback)
finally:
self.ftpfile.close()
self.ftpfile = None
finally:
ftp.close()
else:
#Use HTTP
conn = urllib2.Request('http://'+self.server+'/'+self.directory+'/'+continent+'/'+filename)
r1 = urllib2.urlopen(conn)
data = r1.read()
self.ftpfile = open(os.path.join(self.cachedir,filename), 'wb')
self.ftpfile.write(data)
self.ftpfile.close()
self.ftpfile = None
def ftpCallback(self, data):
"""Called by ftplib when some bytes have been received."""
self.ftpfile.write(data)
self.ftp_bytes_transfered += len(data)
print "\r%d bytes transfered" % self.ftp_bytes_transfered,
class SRTMTile:
"""Base class for all SRTM tiles.
Each SRTM tile is size x size pixels big and contains
data for the area from (lat, lon) to (lat+1, lon+1) inclusive.
This means there is a 1 pixel overlap between tiles. This makes it
easier for as to interpolate the value, because for every point we
only have to look at a single tile.
"""
def __init__(self, f, lat, lon):
zipf = zipfile.ZipFile(f, 'r')
names = zipf.namelist()
if len(names) != 1:
raise InvalidTileError(lat, lon)
data = zipf.read(names[0])
self.size = int(math.sqrt(len(data)/2)) # 2 bytes per sample
# Currently only SRTM1/3 is supported
if self.size not in (1201, 3601):
raise InvalidTileError(lat, lon)
self.data = array.array('h', data)
self.data.byteswap()
if len(self.data) != self.size * self.size:
raise InvalidTileError(lat, lon)
self.lat = lat
self.lon = lon
@staticmethod
def _avg(value1, value2, weight):
"""Returns the weighted average of two values and handles the case where
one value is None. If both values are None, None is returned.
"""
if value1 is None:
return value2
if value2 is None:
return value1
return value2 * weight + value1 * (1 - weight)
def calcOffset(self, x, y):
"""Calculate offset into data array. Only uses to test correctness
of the formula."""
# Datalayout
# X = longitude
# Y = latitude
# Sample for size 1201x1201
# ( 0/1200) ( 1/1200) ... (1199/1200) (1200/1200)
# ( 0/1199) ( 1/1199) ... (1199/1199) (1200/1199)
# ... ... ... ...
# ( 0/ 1) ( 1/ 1) ... (1199/ 1) (1200/ 1)
# ( 0/ 0) ( 1/ 0) ... (1199/ 0) (1200/ 0)
# Some offsets:
# (0/1200) 0
# (1200/1200) 1200
# (0/1199) 1201
# (1200/1199) 2401
# (0/0) 1201*1200
# (1200/0) 1201*1201-1
return x + self.size * (self.size - y - 1)
def getPixelValue(self, x, y):
"""Get the value of a pixel from the data, handling voids in the
SRTM data."""
assert x < self.size, "x: %d<%d" % (x, self.size)
assert y < self.size, "y: %d<%d" % (y, self.size)
# Same as calcOffset, inlined for performance reasons
offset = x + self.size * (self.size - y - 1)
#print offset
value = self.data[offset]
if value == -32768:
return None # -32768 is a special value for areas with no data
return value
def getAltitudeFromLatLon(self, lat, lon):
"""Get the altitude of a lat lon pair, using the four neighbouring
pixels for interpolation.
"""
# print "-----\nFromLatLon", lon, lat
lat -= self.lat
lon -= self.lon
# print "lon, lat", lon, lat
if lat < 0.0 or lat >= 1.0 or lon < 0.0 or lon >= 1.0:
raise WrongTileError(self.lat, self.lon, self.lat+lat, self.lon+lon)
x = lon * (self.size - 1)
y = lat * (self.size - 1)
# print "x,y", x, y
x_int = int(x)
x_frac = x - int(x)
y_int = int(y)
y_frac = y - int(y)
# print "frac", x_int, x_frac, y_int, y_frac
value00 = self.getPixelValue(x_int, y_int)
value10 = self.getPixelValue(x_int+1, y_int)
value01 = self.getPixelValue(x_int, y_int+1)
value11 = self.getPixelValue(x_int+1, y_int+1)
value1 = self._avg(value00, value10, x_frac)
value2 = self._avg(value01, value11, x_frac)
value = self._avg(value1, value2, y_frac)
# print "%4d %4d | %4d\n%4d %4d | %4d\n-------------\n%4d" % (
# value00, value10, value1, value01, value11, value2, value)
return value
class parseHTMLDirectoryListing(HTMLParser):
def __init__(self):
#print "parseHTMLDirectoryListing.__init__"
HTMLParser.__init__(self)
self.title="Undefined"
self.isDirListing = False
self.dirList=[]
self.inTitle = False
self.inHyperLink = False
self.currAttrs=""
self.currHref=""
def handle_starttag(self, tag, attrs):
#print "Encountered the beginning of a %s tag" % tag
if tag=="title":
self.inTitle = True
if tag == "a":
self.inHyperLink = True
self.currAttrs=attrs
for attr in attrs:
if attr[0]=='href':
self.currHref = attr[1]
def handle_endtag(self, tag):
#print "Encountered the end of a %s tag" % tag
if tag=="title":
self.inTitle = False
if tag == "a":
# This is to avoid us adding the parent directory to the list.
if self.currHref!="":
self.dirList.append(self.currHref)
self.currAttrs=""
self.currHref=""
self.inHyperLink = False
def handle_data(self,data):
if self.inTitle:
self.title = data
print "title=%s" % data
if "Index of" in self.title:
#print "it is an index!!!!"
self.isDirListing = True
if self.inHyperLink:
# We do not include parent directory in listing.
if "Parent Directory" in data:
self.currHref=""
def getDirListing(self):
return self.dirList
#DEBUG ONLY
if __name__ == '__main__':
downloader = SRTMDownloader()
downloader.loadFileList()
latitude = raw_input("latitude : ")
longitude = raw_input("longitude : ")
tile = downloader.getTile(latitude,longitude)
I = np.array(tile.data).reshape(1201,1201)
n = np.where(I<0)
I[n]=0
plt.imshow(I)
plt.colorbar()
plt.show()
#tile.getAltitudeFromLatLon(49.1234, 12.56789)
| lgpl-3.0 |
Garrett-R/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 16 | 5134 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from numpy.testing import assert_raises
from scipy.spatial import distance
from sklearn.utils.testing import assert_equal
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
"""DBSCAN.fit should accept a list of lists."""
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| bsd-3-clause |
ahye/FYS2140-Resources | examples/plotting/three_gauss.py | 1 | 1086 | #!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Scriptet viser hvordan man kan plotte 3D-figurer med matplotlib.
@author Benedicte Emilie Braekken
"""
from matplotlib.pyplot import *
from numpy import *
from mpl_toolkits.mplot3d import Axes3D
def gauss_2d( x, y ):
'''
Todimensjonal gauss-kurve.
'''
# Konstant
A = 1
X = ( x - x0 )**2 / ( 2. * sigma_x**2 )
Y = ( y - y0 )**2 / ( 2. * sigma_y**2 )
return A * exp( - ( X + Y ) )
# Antall punkter hver retning (reelt blir det n^2 for hele rommet)
n = 3e2
# Senteret gauss-kurven ligger paa
x0 = 0
y0 = 0
# Bredden paa gauss-kurven
sigma_x = 1
sigma_y = 1
# Enhetsvektorne i hver retning
x = linspace( x0 - 4. * sigma_x, x0 + 4. * sigma_x, n )
y = linspace( y0 - 4. * sigma_y, y0 + 4. * sigma_y, n )
# Lager de to tabellene som inneholder hvert punkt i rommet
# de to enhetsvektorne over utspenner
X, Y = meshgrid( x, y )
# Lager figur
fig = figure()
# Lager akser
ax = fig.add_subplot( 111, projection='3d' )
# Lager overflateplott
ax.plot_surface( X, Y, gauss_2d( X, Y ) )
# Viser
show()
| mit |
terkkila/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 20 | 11431 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether feature_names_ and vocabulary_ should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
crawfordsm/pysalt | saltspec/specsens.py | 2 | 6366 | #!/usr/bin/env python
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
"""
SPECSENS calulates the calibration curve given an observation, a standard star,
and the extinction curve for the site. The task assumes a 1-D spectrum that
has already been sensed from the original observations.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 21 Mar 2011
TODO
----
LIMITATIONS
-----------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time
import numpy as np
import pyfits
from matplotlib.pyplot import *
from pyraf import iraf
import saltstat
import saltsafekey as saltkey
import saltsafeio as saltio
from saltsafelog import logging
import spectools as st
from spectools import SALTSpecError
from PySpectrograph.Spectra import Spectrum
from saltfit import interfit
from pylab import *
debug = True
# -----------------------------------------------------------
# core routine
def specsens(specfile, outfile, stdfile, extfile, airmass=None, exptime=None,
stdzp=3.68e-20, function='polynomial', order=3, thresh=3, niter=5,
fitter='gaussian', clobber=True, logfile='salt.log', verbose=True):
with logging(logfile, debug) as log:
# read in the specfile and create a spectrum object
obs_spectra = st.readspectrum(specfile.strip(), error=True, ftype='ascii')
# smooth the observed spectrum
# read in the std file and convert from magnitudes to fnu
# then convert it to fwave (ergs/s/cm2/A)
std_spectra = st.readspectrum(stdfile.strip(), error=False, ftype='ascii')
std_spectra.flux = Spectrum.magtoflux(std_spectra.flux, stdzp)
std_spectra.flux = Spectrum.fnutofwave(
std_spectra.wavelength, std_spectra.flux)
# Get the typical bandpass of the standard star,
std_bandpass = np.diff(std_spectra.wavelength).mean()
# Smooth the observed spectrum to that bandpass
obs_spectra.flux = st.boxcar_smooth(obs_spectra, std_bandpass)
# read in the extinction file (leave in magnitudes)
ext_spectra = st.readspectrum(extfile.strip(), error=False, ftype='ascii')
# determine the airmass if not specified
if saltio.checkfornone(airmass) is None:
message = 'Airmass was not supplied'
raise SALTSpecError(message)
# determine the exptime if not specified
if saltio.checkfornone(exptime) is None:
message = 'Exposure Time was not supplied'
raise SALTSpecError(message)
# calculate the calibrated spectra
log.message('Calculating the calibration curve for %s' % specfile)
cal_spectra = sensfunc(
obs_spectra, std_spectra, ext_spectra, airmass, exptime)
# plot(cal_spectra.wavelength, cal_spectra.flux * std_spectra.flux)
# fit the spectra--first take a first cut of the spectra
# using the median absolute deviation to throw away bad points
cmed = np.median(cal_spectra.flux)
cmad = saltstat.mad(cal_spectra.flux)
mask = (abs(cal_spectra.flux - cmed) < thresh * cmad)
mask = np.logical_and(mask, (cal_spectra.flux > 0))
# now fit the data
# Fit using a gaussian process.
if fitter=='gaussian':
from sklearn.gaussian_process import GaussianProcess
#Instanciate a Gaussian Process model
dy = obs_spectra.var[mask] ** 0.5
dy /= obs_spectra.flux[mask] / cal_spectra.flux[mask]
y = cal_spectra.flux[mask]
gp = GaussianProcess(corr='squared_exponential', theta0=1e-2,
thetaL=1e-4, thetaU=0.1, nugget=(dy / y) ** 2.0)
X = np.atleast_2d(cal_spectra.wavelength[mask]).T
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
x = np.atleast_2d(cal_spectra.wavelength).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred = gp.predict(x)
cal_spectra.flux = y_pred
else:
fit=interfit(cal_spectra.wavelength[mask], cal_spectra.flux[mask], function=function, order=order, thresh=thresh, niter=niter)
fit.interfit()
cal_spectra.flux=fit(cal_spectra.wavelength)
# write the spectra out
st.writespectrum(cal_spectra, outfile, ftype='ascii')
def sensfunc(obs_spectra, std_spectra, ext_spectra, airmass, exptime):
"""Given an observe spectra, calculate the calibration curve for the
spectra. All data is interpolated to the binning of the obs_spectra.
The calibrated spectra is then calculated from
C = F_obs/ F_std / 10**(-0.4*A*E)/T/dW
where F_obs is the observed flux from the source, F_std is the
standard spectra, A is the airmass, E is the
extinction in mags, T is the exposure time and dW is the bandpass
Parameters
-----------
obs_spectra--spectrum of the observed star (counts/A)
std_spectra--know spectrum of the standard star (ergs/s/cm2/A)
ext_spectra--spectrum of the extinction curve (in mags)
airmass--airmass of the observations
exptime--exposure time of the observations
function
"""
# re-interpt the std_spectra over the same wavelength
std_spectra.interp(obs_spectra.wavelength)
# re-interp the ext_spetra over the same wavelength
ext_spectra.interp(obs_spectra.wavelength)
# create the calibration spectra
cal_spectra = Spectrum.Spectrum(
obs_spectra.wavelength, obs_spectra.flux.copy(), stype='continuum')
# set up the bandpass
bandpass = np.diff(obs_spectra.wavelength).mean()
# correct for extinction
cal_spectra.flux = cal_spectra.flux / \
10 ** (-0.4 * airmass * ext_spectra.flux)
# correct for the exposure time and calculation the sensitivity curve
cal_spectra.flux = cal_spectra.flux / exptime / bandpass / std_spectra.flux
return cal_spectra
# main code
parfile = iraf.osfn("saltspec$specsens.par")
t = iraf.IrafTaskFactory(
taskname="specsens", value=parfile, function=specsens, pkgname='saltspec')
| bsd-3-clause |
jereze/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
hsaputra/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
pompiduskus/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Final/results/2-categories/test10_cross_validate_categories_mov_fixed_1200ms.py | 1 | 4331 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/384')
from data_384 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:3]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Fixed']*35 + ['Movable']*35 + ['Fixed']*35 + ['Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=4)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
#show()
| mit |
theodoregoetz/clas12-dc-wiremap | scratch/ax.py | 1 | 1150 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
im = np.arange(100)
im.shape = 10, 10
fig = plt.figure(1, (5., 5.))
grid = ImageGrid(fig, (0.1, 0.4, 0.2, 0.2), # similar to subplot(111)
nrows_ncols = (2, 2), # creates 2x2 grid of
share_all = True,
axes_pad=0.1, # pad between axes in inch.
)
grid2 = ImageGrid(fig, (0.1, 0.7, 0.2, 0.2), # similar to subplot(111)
nrows_ncols = (2, 2), # creates 2x2 grid of
share_all = True,
axes_pad=0.1, # pad between axes in inch.
)
grid3 = ImageGrid(fig, (0.1, 0.1, 0.2, 0.2), # similar to subplot(111)
nrows_ncols = (2, 2), # creates 2x2 grid of axes
share_all = True,
axes_pad=0.1, # pad between axes in inch.
)
#for i in range(4):
#grid[i].imshow(im) # The AxesGrid object work as a list of axes.
#for j in range(4):
#grid2[j].imshow(im) # The AxesGrid object work as a list of axes.
plt.show()
| gpl-3.0 |
gsnyder206/synthetic-image-morph | candelize.py | 1 | 11413 | import cProfile
import pstats
import math
import string
import sys
import struct
import matplotlib
import numpy as np
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import gzip
import tarfile
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import photutils
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import *
import astropy.io.fits as pyfits
import statmorph
import datetime
import setup_synthetic_images_mp as ssimp
def process_snapshot(subdirpath='.',mockimage_parameters=None,clobber=False, max=None, galaxy=None,seg_filter_label='NC-F200W',magsb_limits=[23.0,25.0,27.0,29.0],camindices='All',do_idl=False,analyze=True,use_nonscatter=True,Np=2):
cwd = os.path.abspath(os.curdir)
os.chdir(subdirpath)
bbfile_list = np.sort(np.asarray(glob.glob('broadbandz.fits*'))) #enable reading .fits.gz files
print(bbfile_list)
if galaxy is not None:
thisbb = np.where(bbfile_list==galaxy)[0]
bbfile_list= bbfile_list[thisbb]
test_file = bbfile_list[0]
tf = pyfits.open(test_file)
print(tf.info())
print(tf['BROADBAND'].header.cards)
print(tf['SFRHIST'].header.get('star_adaptive_smoothing'))
print(tf['SFRHIST'].header.get('star_radius_factor'))
if camindices=='All':
N_cam=int(tf['MCRX'].header['N_CAMERA'])
camindices=range(N_cam)
#this is critical for later
fils = tf['FILTERS'].data.field('filter')
print(fils)
filters_to_analyze = ['hst/acs_f435w','hst/acs_f606w','hst/acs_f775w','hst/acs_f850lp',
'hst/wfc3_f105w','hst/wfc3_f125w','hst/wfc3_f160w',
'jwst/nircam_f070w', 'jwst/nircam_f090w','jwst/nircam_f115w', 'jwst/nircam_f150w',
'jwst/nircam_f200w', 'jwst/nircam_f277w', 'jwst/nircam_f356w', 'jwst/nircam_f444w',
'hst/wfc3_f140w',
'hst/wfc3_f275w', 'hst/wfc3_f336w',
'hst/acs_f814w',
'jwst/miri_F560W','jwst/miri_F770W','jwst/miri_F1000W','jwst/miri_F1130W',
'jwst/miri_F1280W','jwst/miri_F1500W','jwst/miri_F1800W','jwst/miri_F2100W','jwst/miri_F2550W']
skip_filter_boolean = [False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,]
print(filters_to_analyze)
pixsize_arcsec = [0.03,0.03,0.03,0.03,0.06,0.06,0.06,0.032,0.032,0.032,0.032,0.032,0.065,0.065,0.065,0.06,0.03,0.03,0.03,
0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11]
filter_labels = ['ACS-F435W','ACS-F606W','ACS-F775W','ACS-F850LP','WFC3-F105W','WFC3-F125W','WFC3-F160W',
'NC-F070W','NC-F090W','NC-F115W','NC-F150W','NC-F200W','NC-F277W','NC-F356W','NC-F444W',
'WFC3-F140W','WFC3-F275W','WFC3-F336W','ACS-F814W',
'MIRI-F560W','MIRI-F770W','MIRI-F1000W','MIRI-F1130W',
'MIRI-F1280W','MIRI-F1500W','MIRI-F1800W','MIRI-F2100W','MIRI-F2550W']
filter_indices = []
print(len(filters_to_analyze), len(skip_filter_boolean), len(filter_labels))
for i,f in enumerate(filters_to_analyze):
fi = np.where(fils==f)
print(fi[0][0], f, fils[fi[0][0]], filter_labels[i]) #, filters_to_analyze[fi]
filter_indices.append(fi[0][0])
filter_indices = np.asarray(filter_indices)
print(filter_indices)
#order of filter_labels in wavelength space (i.e, F435W is in the "2" position)
filter_lambda_order = [2,3,4,6,7,8,10,
11,12,13,14,15,16,17,18,
9,0,1,5,
19,20,21,22,
23,24,25,26,27]
#photfnu units Jy; flux in 1 ct/s
photfnu_Jy = [1.96e-7,9.17e-8,1.97e-7,4.14e-7,
1.13e-7,1.17e-7,1.52e-7,
5.09e-8,3.72e-8,3.17e-8,2.68e-8,2.64e-8,2.25e-8,2.57e-8,2.55e-8,
9.52e-8,8.08e-7,4.93e-7,1.52e-7,
5.75e-8,3.10e-8,4.21e-8,1.39e-7,
4.65e-8,4.48e-8,5.88e-8,4.98e-8,1.15e-7]
morphcode_dir = "/Users/gsnyder/Documents/pro/morph_december2013/morph_pro/"
morphcode_files = np.asarray(glob.glob(os.path.join(morphcode_dir,"*.*")))
#se_dir = '/Users/gsnyder/Documents/Projects/Illustris_Morphology/Illustris-CANDELS/SE_scripts'
#se_files = np.asarray(glob.glob(os.path.join(se_dir,"*.*")))
psf_files = []
psf_dir = os.path.expandvars('$GFS_PYTHON_CODE/vela-yt-sunrise/kernels')
#psf_names = ['PSFSTD_ACSWFC_F435W.fits','PSFSTD_ACSWFC_F606W.fits','PSFSTD_ACSWFC_F775W_SM3.fits','PSFSTD_ACSWFC_F850L_SM3.fits',
# 'PSFSTD_WFC3IR_F105W.fits','PSFSTD_WFC3IR_F125W.fits','PSFSTD_WFC3IR_F160W.fits',
# 'PSF_NIRCam_F070W_revV-1.fits','PSF_NIRCam_F090W_revV-1.fits','PSF_NIRCam_F115W_revV-1.fits','PSF_NIRCam_F150W_revV-1.fits',
# 'PSF_NIRCam_F200W_revV-1.fits','PSF_NIRCam_F277W_revV-1.fits','PSF_NIRCam_F356W_revV-1.fits','PSF_NIRCam_F444W_revV-1.fits',
# 'PSFSTD_WFC3IR_F140W.fits','PSFSTD_WFC3UV_F275W.fits','PSFSTD_WFC3UV_F336W.fits','PSFSTD_ACSWFC_F814W.fits']
psf_names = ['TinyTim_IllustrisPSFs/F435W_rebin.fits','TinyTim_IllustrisPSFs/F606W_rebin.fits','TinyTim_IllustrisPSFs/F775W_rebin.fits','TinyTim_IllustrisPSFs/F850LP_rebin.fits',
'TinyTim_IllustrisPSFs/F105W_rebin.fits','TinyTim_IllustrisPSFs/F125W_rebin.fits','TinyTim_IllustrisPSFs/F160W_rebin.fits',
'WebbPSF_F070W_trunc.fits','WebbPSF_F090W_trunc.fits','WebbPSF_F115W_trunc.fits','WebbPSF_F150W_trunc.fits',
'WebbPSF_F200W_trunc.fits','WebbPSF_F277W_trunc.fits','WebbPSF_F356W_trunc.fits','WebbPSF_F444W_trunc.fits',
'TinyTim_IllustrisPSFs/F140W_rebin.fits','TinyTim_IllustrisPSFs/F275W_rebin.fits','TinyTim_IllustrisPSFs/F336W_rebin.fits','TinyTim_IllustrisPSFs/F814W_rebin.fits',
'WebbPSF_F560W_trunc.fits','WebbPSF_F770W_trunc.fits','WebbPSF_F1000W_trunc.fits','WebbPSF_F1130W_trunc.fits',
'WebbPSF_F1280W_trunc.fits','WebbPSF_F1500W_trunc.fits','WebbPSF_F1800W_trunc.fits','WebbPSF_F2100W_trunc.fits','WebbPSF_F2550W_trunc.fits']
#psf_pix_arcsec = [0.0125,0.0125,0.0125,0.0125,0.0325,0.0325,0.0325,0.007925,0.007925,0.007925,0.007925,0.007925,0.0162,0.0162,0.0162,0.0325,0.0100,0.0100,0.0125]
#switch to JWST detector sampling for efficiency. They're model psfs anyway, full accuracy not essential
psf_pix_arcsec = [0.03,0.03,0.03,0.03,0.06,0.06,0.06,0.0317,0.0317,0.0317,0.0317,0.0317,0.0648,0.0648,0.0648,0.06,0.03,0.03,0.03,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11,0.11]
psf_truncate = [None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None]
psf_hdu_num = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
psf_fwhm = [0.10,0.11,0.12,0.13,0.14,0.17,0.20,0.11,0.11,0.11,0.11,0.12,0.15,0.18,0.25,0.18,0.07,0.08,0.13,
0.035*5.61,0.035*7.57,0.035*9.90,0.035*11.30,0.035*12.75,0.035*14.96,0.035*17.90,0.035*20.65,0.035*25.11]
#these settings yield full subhalo (4 cams) convolution in 0.92s! convolve_fft ftw!
for pname in psf_names:
psf_file = os.path.join(psf_dir,pname)
psf_files.append(psf_file)
print(psf_file, os.path.lexists(psf_file))
### PSFSTD; WFC3 = 0.06 arcsec, ACS = 0.03 arcsec... I think
### NIRCAM in header with keyword 'PIXELSCL'; short 0.07925 long 0.0162
## acs wfc 0.05 arcsec pixels... PSFSTD x4 oversample?
## wfc3 ir 0.13 arcsec
## wfc3 uv 0.04 arcsec
mockimage_parameters = ssimp.analysis_parameters('mockimage_default')
mockimage_parameters.filter_indices = filter_indices
mockimage_parameters.filter_labels = filter_labels
mockimage_parameters.pixsize_arcsec = pixsize_arcsec
mockimage_parameters.morphcode_base = morphcode_dir
mockimage_parameters.morphcode_files = morphcode_files
#mockimage_parameters.se_base = se_dir
#mockimage_parameters.se_files = se_files
mockimage_parameters.camera_indices = camindices #None #by default, do all
mockimage_parameters.psf_files = psf_files
mockimage_parameters.psf_pix_arcsec = psf_pix_arcsec
mockimage_parameters.psf_truncate = psf_truncate
mockimage_parameters.psf_hdu_num = psf_hdu_num
mockimage_parameters.magsb_limits = magsb_limits
mockimage_parameters.psf_fwhm_arcsec = psf_fwhm
mockimage_parameters.photfnu_Jy = photfnu_Jy
mockimage_parameters.filter_lambda_order = filter_lambda_order
mockimage_parameters.skip_filters = skip_filter_boolean
mockimage_parameters.use_nonscatter = use_nonscatter
#use exactly one detection and segmentation per object, depending on redshift
#enormous simplification
#observationally, go w deepest filter. here... ?
mockimage_parameters.segment_filter_label = seg_filter_label
mockimage_parameters.segment_filter_index = np.where(np.asarray(mockimage_parameters.filter_labels) == seg_filter_label)[0][0]
print(mockimage_parameters.segment_filter_label)
print(mockimage_parameters.segment_filter_index)
assert(len(psf_pix_arcsec)==len(pixsize_arcsec))
assert(len(filter_labels)==len(mockimage_parameters.psf_files))
bbdirs = []
for i,bbfile in enumerate(bbfile_list):
try:
bbdir = ssimp.process_single_broadband(bbfile,mockimage_parameters,clobber=clobber,do_idl=do_idl,analyze=analyze,bbase="broadbandz",Np=Np)
bbdirs.append(bbdir)
except (KeyboardInterrupt,NameError,AttributeError,KeyError,TypeError,IndexError) as e:
print(e)
raise
except:
print("Exception while processing broadband: ", bbfile)
print("Error:", sys.exc_info()[0])
else:
print("Successfully processed broadband: ", bbfile)
os.chdir(cwd)
return bbdirs
if __name__=="__main__":
res = process_snapshot(subdirpath='.',clobber=False,seg_filter_label='NC-F200W',magsb_limits=[25.0,27.0],do_idl=False,analyze=True,use_nonscatter=False,Np=4)
res = process_snapshot(subdirpath='.',clobber=False,seg_filter_label='NC-F200W',magsb_limits=[25.0,27.0],do_idl=False,analyze=True,use_nonscatter=True,Np=4)
| gpl-2.0 |
taynaud/sparkit-learn | splearn/linear_model/base.py | 2 | 5024 | # encoding: utf-8
import operator
import numpy as np
import scipy.sparse as sp
from sklearn.base import copy
from sklearn.linear_model.base import LinearRegression
from ..utils.validation import check_rdd
class SparkLinearModelMixin(object):
def __add__(self, other):
"""Add method for Linear models with coef and intercept attributes.
Parameters
----------
other : fitted sklearn linear model
Model to add.
Returns
-------
model : Linear model
Model with updated coefficients.
"""
model = copy.deepcopy(self)
model.coef_ += other.coef_
model.intercept_ += other.intercept_
return model
def __radd__(self, other):
"""Reverse add method for Linear models.
Parameters
----------
other : fitted sklearn linear model
Model to add.
Returns
-------
model : Linear model
Model with updated coefficients.
"""
return self if other == 0 else self.__add__(other)
def __div__(self, other):
"""Division method for Linear models. Used for averaging.
Parameters
----------
other : integer
Integer to divide with.
Returns
-------
model : Linear model
Model with updated coefficients.
"""
self.coef_ /= other
self.intercept_ /= other
return self
__truediv__ = __div__
def _spark_fit(self, cls, Z, *args, **kwargs):
"""Wraps a Scikit-learn Linear model's fit method to use with RDD
input.
Parameters
----------
cls : class object
The sklearn linear model's class to wrap.
Z : TupleRDD or DictRDD
The distributed train data in a DictRDD.
Returns
-------
self: the wrapped class
"""
mapper = lambda X_y: super(cls, self).fit(
X_y[0], X_y[1], *args, **kwargs
)
models = Z.map(mapper)
avg = models.reduce(operator.add) / models.count()
self.__dict__.update(avg.__dict__)
return self
def _spark_predict(self, cls, X, *args, **kwargs):
"""Wraps a Scikit-learn Linear model's predict method to use with RDD
input.
Parameters
----------
cls : class object
The sklearn linear model's class to wrap.
Z : ArrayRDD
The distributed data to predict in a DictRDD.
Returns
-------
self: the wrapped class
"""
return X.map(lambda X: super(cls, self).predict(X, *args, **kwargs))
def _to_scikit(self, cls):
new = cls()
new.__dict__ = self.__dict__
return new
class SparkLinearRegression(LinearRegression, SparkLinearModelMixin):
"""Distributed implementation of sklearn's Linear Regression.
Parameters
----------
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
n_jobs : The number of jobs to use for the computation.
If -1 all CPUs are used. This will only provide speedup for
n_targets > 1 and sufficient large problems.
Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
intercept_ : array
Independent term in the linear model.
"""
def fit(self, Z):
"""
Fit linear model.
Parameters
----------
Z : DictRDD with (X, y) values
X containing numpy array or sparse matrix - The training data
y containing the target values
Returns
-------
self : returns an instance of self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
return self._spark_fit(SparkLinearRegression, Z)
def predict(self, X):
"""Distributed method to predict class labels for samples in X.
Parameters
----------
X : ArrayRDD containing {array-like, sparse matrix}
Samples.
Returns
-------
C : ArrayRDD
Predicted class label per sample.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return self._spark_predict(SparkLinearRegression, X)
def to_scikit(self):
return self._to_scikit(LinearRegression)
| apache-2.0 |
GaelVaroquaux/scikits.image | doc/ext/docscrape_sphinx.py | 62 | 7703 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
doanduyhai/incubator-zeppelin | python/src/main/resources/grpc/python/zeppelin_python.py | 9 | 4436 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class PyZeppelinContext(object):
""" A context impl that uses Py4j to communicate to JVM
"""
def __init__(self, z):
self.z = z
self.paramOption = gateway.jvm.org.apache.zeppelin.display.ui.OptionInput.ParamOption
self.javaList = gateway.jvm.java.util.ArrayList
self.max_result = z.getMaxResult()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def textbox(self, name, defaultValue=""):
return self.z.textbox(name, defaultValue)
def noteTextbox(self, name, defaultValue=""):
return self.z.noteTextbox(name, defaultValue)
def select(self, name, options, defaultValue=""):
return self.z.select(name, defaultValue, self.getParamOptions(options))
def noteSelect(self, name, options, defaultValue=""):
return self.z.noteSelect(name, defaultValue, self.getParamOptions(options))
def checkbox(self, name, options, defaultChecked=[]):
return self.z.checkbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def noteCheckbox(self, name, options, defaultChecked=[]):
return self.z.noteCheckbox(name, self.getDefaultChecked(defaultChecked), self.getParamOptions(options))
def getParamOptions(self, options):
javaOptions = gateway.new_array(self.paramOption, len(options))
i = 0
for tuple in options:
javaOptions[i] = self.paramOption(tuple[0], tuple[1])
i += 1
return javaOptions
def getDefaultChecked(self, defaultChecked):
javaDefaultChecked = self.javaList()
for check in defaultChecked:
javaDefaultChecked.append(check)
return javaDefaultChecked
def show(self, p, **kwargs):
if type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, show_index=False, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = StringIO("")
if show_index:
idx_name = str(df.index.name) if df.index.name is not None else ""
header_buf.write(idx_name + "\t")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = StringIO("")
rows = df.head(self.max_result).values if limit else df.values
index = df.index.values
for idx, row in zip(index, rows):
if show_index:
body_buf.write("%html <strong>{}</strong>".format(idx))
body_buf.write("\t")
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
# start JVM gateway
client = GatewayClient(address='127.0.0.1', port=${JVM_GATEWAY_PORT})
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.zeppelin.display.Input")
intp = gateway.entry_point
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
| apache-2.0 |
geoscixyz/em_examples | em_examples/DCWidgetResLayer2D.py | 1 | 28082 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from SimPEG import Mesh, Maps, SolverLU, Utils
from SimPEG.Utils import ExtractCoreMesh
import numpy as np
from SimPEG.EM.Static import DC
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import LogFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.constants import epsilon_0
import copy
from ipywidgets import interact, IntSlider, FloatSlider, FloatText, ToggleButtons
from .Base import widgetify
# Mesh, sigmaMap can be globals global
npad = 15
growrate = 2.
cs = 0.5
hx = [(cs, npad, -growrate), (cs, 200), (cs, npad, growrate)]
hy = [(cs, npad, -growrate), (cs, 100)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
idmap = Maps.IdentityMap(mesh)
sigmaMap = idmap
dx = 5
xr = np.arange(-40, 41, dx)
dxr = np.diff(xr)
xmin = -40.
xmax = 40.
ymin = -40.
ymax = 8.
xylim = np.c_[[xmin, ymin], [xmax, ymax]]
indCC, meshcore = ExtractCoreMesh(xylim, mesh)
indx = (mesh.gridFx[:, 0] >= xmin) & (mesh.gridFx[:, 0] <= xmax) \
& (mesh.gridFx[:, 1] >= ymin) & (mesh.gridFx[:, 1] <= ymax)
indy = (mesh.gridFy[:, 0] >= xmin) & (mesh.gridFy[:, 0] <= xmax) \
& (mesh.gridFy[:, 1] >= ymin) & (mesh.gridFy[:, 1] <= ymax)
indF = np.concatenate((indx, indy))
def model_fields(A, B, zcLayer, dzLayer, xc, zc, r, sigLayer, sigTarget, sigHalf):
# Create halfspace model
mhalf = sigHalf * np.ones([mesh.nC, ])
# Add layer to model
mLayer = addLayer2Mod(zcLayer, dzLayer, mhalf, sigLayer)
# Add plate or cylinder
# fullMod = addPlate2Mod(xc,zc,dx,dz,rotAng,LayerMod,sigTarget)
mtrue = addCylinder2Mod(xc, zc, r, mLayer, sigTarget)
Mx = np.empty(shape=(0, 2))
Nx = np.empty(shape=(0, 2))
rx = DC.Rx.Dipole(Mx, Nx)
if(B == []):
src = DC.Src.Pole([rx], np.r_[A, 0.])
else:
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
survey = DC.Survey([src])
survey_prim = DC.Survey([src])
problem = DC.Problem3D_CC(mesh, sigmaMap=sigmaMap)
problem_prim = DC.Problem3D_CC(mesh, sigmaMap=sigmaMap)
problem.Solver = SolverLU
problem_prim.Solver = SolverLU
problem.pair(survey)
problem_prim.pair(survey_prim)
primary_field = problem_prim.fields(mhalf)
total_field = problem.fields(mtrue)
return mtrue, mhalf, src, primary_field, total_field
def addLayer2Mod(zcLayer, dzLayer, modd, sigLayer):
CCLocs = mesh.gridCC
mod = copy.copy(modd)
zmax = zcLayer + dzLayer / 2.
zmin = zcLayer - dzLayer / 2.
belowInd = np.where(CCLocs[:, 1] <= zmax)[0]
aboveInd = np.where(CCLocs[:, 1] >= zmin)[0]
layerInds = list(set(belowInd).intersection(aboveInd))
# # Check selected cell centers by plotting
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plt.scatter(CCLocs[layerInds,0],CCLocs[layerInds,1])
# ax.set_xlim(-40,40)
# ax.set_ylim(-35,0)
# plt.axes().set_aspect('equal')
# plt.show()
mod[layerInds] = sigLayer
return mod
def getCylinderPoints(xc, zc, r):
xLocOrig1 = np.arange(-r, r + r / 10., r / 10.)
xLocOrig2 = np.arange(r, -r - r / 10., -r / 10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2. + r**2.) + zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2. + r**2.) + zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc * np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc * np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1, zLoc1]).T
topHalf = topHalf[0:-1, :]
bottomhalf = np.vstack([xLoc2, zLoc2]).T
bottomhalf = bottomhalf[0:-1, :]
cylinderPoints = np.vstack([topHalf, bottomhalf])
cylinderPoints = np.vstack([cylinderPoints, topHalf[0, :]])
return cylinderPoints
def addCylinder2Mod(xc, zc, r, modd, sigCylinder):
# Get points for cylinder outline
cylinderPoints = getCylinderPoints(xc, zc, r)
mod = copy.copy(modd)
verts = []
codes = []
for ii in range(0, cylinderPoints.shape[0]):
verts.append(cylinderPoints[ii, :])
if(ii == 0):
codes.append(Path.MOVETO)
elif(ii == cylinderPoints.shape[0] - 1):
codes.append(Path.CLOSEPOLY)
else:
codes.append(Path.LINETO)
path = Path(verts, codes)
CCLocs = mesh.gridCC
insideInd = np.where(path.contains_points(CCLocs))
# #Check selected cell centers by plotting
# # print insideInd
# fig = plt.figure()
# ax = fig.add_subplot(111)
# patch = patches.PathPatch(path, facecolor='none', lw=2)
# ax.add_patch(patch)
# plt.scatter(CCLocs[insideInd,0],CCLocs[insideInd,1])
# ax.set_xlim(-40,40)
# ax.set_ylim(-35,0)
# plt.axes().set_aspect('equal')
# plt.show()
mod[insideInd] = sigCylinder
return mod
# def getPlateCorners(xc, zc, dx, dz, rotAng):
# # Form rotation matix
# rotMat = np.array([[np.cos(rotAng*(np.pi/180.)), -np.sin(rotAng*(np.pi/180.))],[np.sin(rotAng*(np.pi/180.)), np.cos(rotAng*(np.pi/180.))]])
# originCorners = np.array([[-0.5*dx, 0.5*dz], [0.5*dx, 0.5*dz], [-0.5*dx, -0.5*dz], [0.5*dx, -0.5*dz]])
# rotPlateCorners = np.dot(originCorners,rotMat)
# plateCorners = rotPlateCorners + np.hstack([np.repeat(xc,4).reshape([4,1]),np.repeat(zc,4).reshape([4,1])])
# return plateCorners
# def addPlate2Mod(xc, zc, dx, dz, rotAng, mod, sigPlate):
# # use matplotlib paths to find CC inside of polygon
# plateCorners = getPlateCorners(xc,zc,dx,dz,rotAng)
# verts = [
# (plateCorners[0,:]), # left, top
# (plateCorners[1,:]), # right, top
# (plateCorners[3,:]), # right, bottom
# (plateCorners[2,:]), # left, bottom
# (plateCorners[0,:]), # left, top (closes polygon)
# ]
# codes = [Path.MOVETO,
# Path.LINETO,
# Path.LINETO,
# Path.LINETO,
# Path.CLOSEPOLY,
# ]
# path = Path(verts, codes)
# CCLocs = mesh.gridCC
# insideInd = np.where(path.contains_points(CCLocs))
# #Check selected cell centers by plotting
# # print insideInd
# # fig = plt.figure()
# # ax = fig.add_subplot(111)
# # patch = patches.PathPatch(path, facecolor='none', lw=2)
# # ax.add_patch(patch)
# # plt.scatter(CCLocs[insideInd,0],CCLocs[insideInd,1])
# # ax.set_xlim(-10,10)
# # ax.set_ylim(-20,0)
# # plt.axes().set_aspect('equal')
# # plt.show()
# mod[insideInd] = sigPlate
# return mod
def get_Surface_Potentials(survey, src, field_obj):
phi = field_obj[src, 'phi']
CCLoc = mesh.gridCC
zsurfaceLoc = np.max(CCLoc[:, 1])
surfaceInd = np.where(CCLoc[:, 1] == zsurfaceLoc)
phiSurface = phi[surfaceInd]
xSurface = CCLoc[surfaceInd, 0].T
phiScale = 0.
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
refInd = Utils.closestPoints(mesh, [xmax + 60., 0.], gridLoc='CC')
# refPoint = CCLoc[refInd]
# refSurfaceInd = np.where(xSurface == refPoint[0])
# phiScale = np.median(phiSurface)
phiScale = phi[refInd]
phiSurface = phiSurface - phiScale
return xSurface, phiSurface, phiScale
def sumCylinderCharges(xc, zc, r, qSecondary):
chargeRegionVerts = getCylinderPoints(xc, zc, r + 0.5)
codes = chargeRegionVerts.shape[0] * [Path.LINETO]
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
chargeRegionPath = Path(chargeRegionVerts, codes)
CCLocs = mesh.gridCC
chargeRegionInsideInd = np.where(chargeRegionPath.contains_points(CCLocs))
plateChargeLocs = CCLocs[chargeRegionInsideInd]
plateCharge = qSecondary[chargeRegionInsideInd]
posInd = np.where(plateCharge >= 0)
negInd = np.where(plateCharge < 0)
qPos = Utils.mkvc(plateCharge[posInd])
qNeg = Utils.mkvc(plateCharge[negInd])
qPosLoc = plateChargeLocs[posInd, :][0]
qNegLoc = plateChargeLocs[negInd, :][0]
qPosData = np.vstack([qPosLoc[:, 0], qPosLoc[:, 1], qPos]).T
qNegData = np.vstack([qNegLoc[:, 0], qNegLoc[:, 1], qNeg]).T
if qNeg.shape == (0,) or qPos.shape == (0,):
qNegAvgLoc = np.r_[-10, -10]
qPosAvgLoc = np.r_[+10, -10]
else:
qNegAvgLoc = np.average(qNegLoc, axis=0, weights=qNeg)
qPosAvgLoc = np.average(qPosLoc, axis=0, weights=qPos)
qPosSum = np.sum(qPos)
qNegSum = np.sum(qNeg)
# # Check things by plotting
# fig = plt.figure()
# ax = fig.add_subplot(111)
# platePatch = patches.PathPatch(platePath, facecolor='none', lw=2)
# ax.add_patch(platePatch)
# chargeRegionPatch = patches.PathPatch(chargeRegionPath, facecolor='none', lw=2)
# ax.add_patch(chargeRegionPatch)
# plt.scatter(qNegAvgLoc[0],qNegAvgLoc[1],color='b')
# plt.scatter(qPosAvgLoc[0],qPosAvgLoc[1],color='r')
# ax.set_xlim(-15,5)
# ax.set_ylim(-25,-5)
# plt.axes().set_aspect('equal')
# plt.show()
return qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc
def getSensitivity(survey, A, B, M, N, model):
if(survey == "Dipole-Dipole"):
rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Dipole"):
rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
elif(survey == "Dipole-Pole"):
rx = DC.Rx.Pole(np.r_[M, 0.])
src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.])
elif(survey == "Pole-Pole"):
rx = DC.Rx.Pole(np.r_[M, 0.])
src = DC.Src.Pole([rx], np.r_[A, 0.])
survey = DC.Survey([src])
problem = DC.Problem3D_CC(mesh, sigmaMap=sigmaMap)
problem.Solver = SolverLU
problem.pair(survey)
fieldObj = problem.fields(model)
J = problem.Jtvec(model, np.array([1.]), f=fieldObj)
return J
def calculateRhoA(survey, VM, VN, A, B, M, N):
eps = 1e-9 # to stabilize division
if(survey == "Dipole-Dipole"):
G = 1. / (1. / (np.abs(A - M) + eps) - 1. / (np.abs(M - B) + eps) -
1. / (np.abs(N - A) + eps) + 1. / (np.abs(N - B) + eps))
rho_a = (VM - VN) * 2. * np.pi * G
elif(survey == "Pole-Dipole"):
G = 1. / (1. / (np.abs(A - M) + eps) - 1. / (np.abs(N - A) + eps))
rho_a = (VM - VN) * 2. * np.pi * G
elif(survey == "Dipole-Pole"):
G = 1. / (1. / (np.abs(A - M) + eps) - 1. / (np.abs(M - B) + eps))
rho_a = (VM) * 2. * np.pi * G
elif(survey == "Pole-Pole"):
G = 1. / (1. / (np.abs(A - M) + eps))
rho_a = (VM) * 2. * np.pi * G
return rho_a
# Inline functions for computing apparent resistivity
# eps = 1e-9 #to stabilize division
# G = lambda A, B, M, N: 1. / ( 1./(np.abs(A-M)+eps) - 1./(np.abs(M-B)+eps) - 1./(np.abs(N-A)+eps) + 1./(np.abs(N-B)+eps) )
# rho_a = lambda VM,VN, A,B,M,N: (VM-VN)*2.*np.pi*G(A,B,M,N)
def plot_Surface_Potentials(survey, A, B, M, N, zcLayer, dzLayer, xc, zc, r, rhoHalf, rhoLayer, rhoTarget, Field, Type, Scale):
labelsize = 16.
ticksize = 16.
sigTarget = 1. / rhoTarget
# rhoLayer = np.exp(logRhoLayer)
sigLayer = 1. / rhoLayer
sigHalf = 1. / rhoHalf
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
B = []
mtrue, mhalf, src, primary_field, total_field = model_fields(
A, B, zcLayer, dzLayer, xc, zc, r, sigLayer, sigTarget, sigHalf)
fig, ax = plt.subplots(2, 1, figsize=(9 * 1.5, 9 * 1.8), sharex=True)
fig.subplots_adjust(right=0.8, wspace=0.05, hspace=0.05)
xSurface, phiTotalSurface, phiScaleTotal = get_Surface_Potentials(
survey, src, total_field)
xSurface, phiPrimSurface, phiScalePrim = get_Surface_Potentials(
survey, src, primary_field)
ylim = np.r_[-1., 1.] * np.max(np.abs(phiTotalSurface))
xlim = np.array([-40, 40])
if(survey == "Dipole-Pole" or survey == "Pole-Pole"):
MInd = np.where(xSurface == M)
N = []
VM = phiTotalSurface[MInd[0]]
VN = 0.
VMprim = phiPrimSurface[MInd[0]]
VNprim = 0.
else:
MInd = np.where(xSurface == M)
NInd = np.where(xSurface == N)
VM = phiTotalSurface[MInd[0]]
VN = phiTotalSurface[NInd[0]]
VMprim = phiPrimSurface[MInd[0]]
VNprim = phiPrimSurface[NInd[0]]
# 2D geometric factor
G2D = rhoHalf / (calculateRhoA(survey, VMprim, VNprim, A, B, M, N))
ax[0].plot(xSurface, phiTotalSurface, color=[0.1, 0.5, 0.1], linewidth=2)
ax[0].plot(xSurface, phiPrimSurface,
linestyle='dashed', linewidth=0.5, color='k')
ax[0].grid(which='both', linestyle='-', linewidth=0.5,
color=[0.2, 0.2, 0.2], alpha=0.5)
if(survey == "Pole-Dipole" or survey == "Pole-Pole"):
ax[0].plot(A, 0, '+', markersize=12,
markeredgewidth=3, color=[1., 0., 0])
else:
ax[0].plot(A, 0, '+', markersize=12,
markeredgewidth=3, color=[1., 0., 0])
ax[0].plot(B, 0, '_', markersize=12,
markeredgewidth=3, color=[0., 0., 1.])
ax[0].set_ylabel('Potential, (V)', fontsize=14)
ax[0].set_xlabel('x (m)', fontsize=14)
ax[0].set_xlim(xlim)
ax[0].set_ylim(ylim)
if(survey == "Dipole-Pole" or survey == "Pole-Pole"):
ax[0].plot(M, VM, 'o', color='k')
xytextM = (
M + 0.5, np.max([np.min([VM, ylim.max()]), ylim.min()]) + 0.5)
ax[0].annotate('%2.1e' % (VM), xy=xytextM,
xytext=xytextM, fontsize=labelsize)
else:
ax[0].plot(M, VM, 'o', color='k')
ax[0].plot(N, VN, 'o', color='k')
xytextM = (
M + 0.5, np.max([np.min([VM, ylim.max()]), ylim.min()]) + 1.)
xytextN = (
N + 0.5, np.max([np.min([VN, ylim.max()]), ylim.min()]) + 1.)
ax[0].annotate('%2.1e' % (VM), xy=xytextM,
xytext=xytextM, fontsize=labelsize)
ax[0].annotate('%2.1e' % (VN), xy=xytextN,
xytext=xytextN, fontsize=labelsize)
ax[0].tick_params(axis='both', which='major', labelsize=ticksize)
props = dict(boxstyle='round', facecolor='grey', alpha=0.4)
ax[0].text(xlim.max() + 1, ylim.max() - 0.1 * ylim.max(), '$\\rho_a$ = %2.2f' % (G2D * calculateRhoA(survey, VM, VN, A, B, M, N)),
verticalalignment='bottom', bbox=props, fontsize=labelsize)
ax[0].legend(['Model Potential', 'Layered Earth Potential'],
loc=3, fontsize=labelsize)
if Field == 'Model':
label = 'Resisitivity (ohm-m)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "jet_r"}
if Scale == 'Log':
pcolorOpts = {'norm': matplotlib.colors.LogNorm(), "cmap": "jet_r"}
if Type == 'Total':
u = 1. / (sigmaMap * mtrue)
elif Type == 'Primary':
u = 1. / (sigmaMap * mhalf)
elif Type == 'Secondary':
u = 1. / (sigmaMap * mtrue) - 1. / (sigmaMap * mhalf)
if Scale == 'Log':
linthresh = 10.
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2), "cmap": "jet_r"}
elif Field == 'Potential':
label = 'Potential (V)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
formatter = "%.1e"
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
linthresh = 10.
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2), "cmap": "viridis"}
if Type == 'Total':
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = total_field[src, 'phi'] - phiScaleTotal
elif Type == 'Primary':
# formatter = LogFormatter(10, labelOnlyBase=False)
# pcolorOpts = {'norm':matplotlib.colors.SymLogNorm(linthresh=10, linscale=0.1)}
u = primary_field[src, 'phi'] - phiScalePrim
elif Type == 'Secondary':
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
uTotal = total_field[src, 'phi'] - phiScaleTotal
uPrim = primary_field[src, 'phi'] - phiScalePrim
u = uTotal - uPrim
elif Field == 'E':
label = 'Electric Field (V/m)'
xtype = 'F'
view = 'vec'
streamOpts = {'color': 'w'}
ind = indF
#formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
pcolorOpts = {'norm': matplotlib.colors.LogNorm(),
"cmap": "viridis"}
formatter = "%.1e"
if Type == 'Total':
u = total_field[src, 'e']
elif Type == 'Primary':
u = primary_field[src, 'e']
elif Type == 'Secondary':
uTotal = total_field[src, 'e']
uPrim = primary_field[src, 'e']
u = uTotal - uPrim
elif Field == 'J':
label = 'Current density ($A/m^2$)'
xtype = 'F'
view = 'vec'
streamOpts = {'color': 'w'}
ind = indF
#formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
pcolorOpts = {'norm': matplotlib.colors.LogNorm(),
"cmap": "viridis"}
formatter = "%.1e"
if Type == 'Total':
u = total_field[src, 'j']
elif Type == 'Primary':
u = primary_field[src, 'j']
elif Type == 'Secondary':
uTotal = total_field[src, 'j']
uPrim = primary_field[src, 'j']
u = uTotal - uPrim
elif Field == 'Charge':
label = 'Charge Density ($C/m^2$)'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "RdBu_r"}
if Scale == 'Log':
linthresh = 1e-12
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2), "cmap": "RdBu_r"}
formatter = "%.1e"
if Type == 'Total':
u = total_field[src, 'charge']
elif Type == 'Primary':
u = primary_field[src, 'charge']
elif Type == 'Secondary':
uTotal = total_field[src, 'charge']
uPrim = primary_field[src, 'charge']
u = uTotal - uPrim
elif Field == 'Sensitivity':
label = 'Sensitivity'
xtype = 'CC'
view = 'real'
streamOpts = None
ind = indCC
# formatter = None
# pcolorOpts = {"cmap":"viridis"}
# formatter = LogFormatter(10, labelOnlyBase=False)
pcolorOpts = {"cmap": "viridis"}
if Scale == 'Log':
linthresh = 1.
pcolorOpts = {'norm': matplotlib.colors.SymLogNorm(
linthresh=linthresh, linscale=0.2), "cmap": "viridis"}
# formatter = formatter = "$10^{%.1f}$"
formatter = "%.1e"
if Type == 'Total':
u = getSensitivity(survey, A, B, M, N, mtrue)
elif Type == 'Primary':
u = getSensitivity(survey, A, B, M, N, mhalf)
elif Type == 'Secondary':
uTotal = getSensitivity(survey, A, B, M, N, mtrue)
uPrim = getSensitivity(survey, A, B, M, N, mhalf)
u = uTotal - uPrim
if Scale == 'Log':
eps = 1e-16
else:
eps = 0.
dat = meshcore.plotImage(u[ind] + eps, vType=xtype, ax=ax[1], grid=False, view=view,
streamOpts=streamOpts, pcolorOpts=pcolorOpts) # gridOpts={'color':'k', 'alpha':0.5}
# Get cylinder outline
cylinderPoints = getCylinderPoints(xc, zc, r)
if(rhoTarget != rhoHalf):
ax[1].plot(cylinderPoints[:, 0], cylinderPoints[
:, 1], linestyle='dashed', color='k')
if(rhoLayer != rhoHalf):
layerX = np.arange(xmin, xmax + 1)
layerTopY = (zcLayer + dzLayer / 2.) * np.ones_like(layerX)
layerBottomY = (zcLayer - dzLayer / 2.) * np.ones_like(layerX)
ax[1].plot(layerX, layerTopY, linestyle='dashed', color='k')
ax[1].plot(layerX, layerBottomY, linestyle='dashed', color='k')
if (Field == 'Charge') and (Type != 'Primary') and (Type != 'Total'):
qTotal = total_field[src, 'charge']
qPrim = primary_field[src, 'charge']
qSecondary = qTotal - qPrim
qPosSum, qNegSum, qPosAvgLoc, qNegAvgLoc = sumCylinderCharges(
xc, zc, r, qSecondary)
ax[1].plot(qPosAvgLoc[0], qPosAvgLoc[1], marker='.',
color='black', markersize=labelsize)
ax[1].plot(qNegAvgLoc[0], qNegAvgLoc[1], marker='.',
color='black', markersize=labelsize)
if(qPosAvgLoc[0] > qNegAvgLoc[0]):
xytext_qPos = (qPosAvgLoc[0] + 1., qPosAvgLoc[1] - 0.5)
xytext_qNeg = (qNegAvgLoc[0] - 15., qNegAvgLoc[1] - 0.5)
else:
xytext_qPos = (qPosAvgLoc[0] - 15., qPosAvgLoc[1] - 0.5)
xytext_qNeg = (qNegAvgLoc[0] + 1., qNegAvgLoc[1] - 0.5)
ax[1].annotate('+Q = %2.1e' % (qPosSum), xy=xytext_qPos,
xytext=xytext_qPos, fontsize=labelsize)
ax[1].annotate('-Q = %2.1e' % (qNegSum), xy=xytext_qNeg,
xytext=xytext_qNeg, fontsize=labelsize)
ax[1].set_xlabel('x (m)', fontsize=labelsize)
ax[1].set_ylabel('z (m)', fontsize=labelsize)
if(survey == "Dipole-Dipole"):
ax[1].plot(A, 1., marker='v', color='red', markersize=labelsize)
ax[1].plot(B, 1., marker='v', color='blue', markersize=labelsize)
ax[1].plot(M, 1., marker='^', color='yellow', markersize=labelsize)
ax[1].plot(N, 1., marker='^', color='green', markersize=labelsize)
xytextA1 = (A - 0.5, 2.5)
xytextB1 = (B - 0.5, 2.5)
xytextM1 = (M - 0.5, 2.5)
xytextN1 = (N - 0.5, 2.5)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('B', xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate('N', xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif(survey == "Pole-Dipole"):
ax[1].plot(A, 1., marker='v', color='red', markersize=labelsize)
ax[1].plot(M, 1., marker='^', color='yellow', markersize=labelsize)
ax[1].plot(N, 1., marker='^', color='green', markersize=labelsize)
xytextA1 = (A - 0.5, 2.5)
xytextM1 = (M - 0.5, 2.5)
xytextN1 = (N - 0.5, 2.5)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].annotate('N', xy=xytextN1, xytext=xytextN1, fontsize=labelsize)
elif(survey == "Dipole-Pole"):
ax[1].plot(A, 1., marker='v', color='red', markersize=labelsize)
ax[1].plot(B, 1., marker='v', color='blue', markersize=labelsize)
ax[1].plot(M, 1., marker='^', color='yellow', markersize=labelsize)
xytextA1 = (A - 0.5, 2.5)
xytextB1 = (B - 0.5, 2.5)
xytextM1 = (M - 0.5, 2.5)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('B', xy=xytextB1, xytext=xytextB1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
elif(survey == "Pole-Pole"):
ax[1].plot(A, 1., marker='v', color='red', markersize=labelsize)
ax[1].plot(M, 1., marker='^', color='yellow', markersize=labelsize)
xytextA1 = (A - 0.5, 2.5)
xytextM1 = (M - 0.5, 2.5)
ax[1].annotate('A', xy=xytextA1, xytext=xytextA1, fontsize=labelsize)
ax[1].annotate('M', xy=xytextM1, xytext=xytextM1, fontsize=labelsize)
ax[1].tick_params(axis='both', which='major', labelsize=ticksize)
cbar_ax = fig.add_axes([0.8, 0.05, 0.08, 0.5])
cbar_ax.axis('off')
vmin, vmax = dat[0].get_clim()
if Scale == 'Log':
if (Field == 'E') or (Field == 'J'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.logspace(
np.log10(vmin), np.log10(vmax), 5))
elif (Field == 'Model'):
if (Type == 'Secondary'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[
np.minimum(0., vmin), np.maximum(0., vmax)])
else:
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.logspace(
np.log10(vmin), np.log10(vmax), 5))
else:
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[-1. * np.logspace(
np.log10(-vmin - eps), np.log10(linthresh), 3)[:-1], 0., np.logspace(np.log10(linthresh), np.log10(vmax), 3)[1:]])
else:
if (Field == 'Model') and (Type == 'Secondary'):
cb = plt.colorbar(dat[0], ax=cbar_ax, format=formatter, ticks=np.r_[
np.minimum(0., vmin), np.maximum(0., vmax)])
else:
cb = plt.colorbar(
dat[0], ax=cbar_ax, format=formatter, ticks=np.linspace(vmin, vmax, 5))
cb.ax.tick_params(labelsize=ticksize)
cb.set_label(label, fontsize=labelsize)
ax[1].set_xlim([-40., 40.])
ax[1].set_ylim([-40., 8.])
ax[1].set_aspect('equal')
plt.show()
def ResLayer_app():
app = widgetify(plot_Surface_Potentials,
survey=ToggleButtons(options=[
'Dipole-Dipole', 'Dipole-Pole', 'Pole-Dipole', 'Pole-Pole'], value='Dipole-Dipole'),
zcLayer=FloatSlider(min=-10., max=0., step=1., value=-10.,
continuous_update=False, description="$zc_{layer}$"),
dzLayer=FloatSlider(min=0.5, max=5., step=0.5, value=1.,
continuous_update=False, description="$dz_{layer}$"),
rhoLayer=FloatText(
min=1e-8, max=1e8, value=5000., continuous_update=False, description='$\\rho_{2}$'),
xc=FloatSlider(min=-30., max=30., step=1.,
value=0., continuous_update=False),
zc=FloatSlider(min=-30., max=-15., step=0.5,
value=-25., continuous_update=False),
r=FloatSlider(min=1., max=10., step=0.5,
value=5., continuous_update=False),
rhoHalf=FloatText(
min=1e-8, max=1e8, value=500., continuous_update=False, description='$\\rho_{1}$'),
rhoTarget=FloatText(
min=1e-8, max=1e8, value=500., continuous_update=False, description='$\\rho_{3}$'),
A=FloatSlider(min=-30.25, max=30.25, step=0.5,
value=-30.25, continuous_update=False),
B=FloatSlider(min=-30.25, max=30.25, step=0.5,
value=30.25, continuous_update=False),
M=FloatSlider(min=-30.25, max=30.25, step=0.5,
value=-10.25, continuous_update=False),
N=FloatSlider(min=-30.25, max=30.25, step=0.5,
value=10.25, continuous_update=False),
Field=ToggleButtons(
options=['Model', 'Potential', 'E', 'J', 'Charge', 'Sensitivity'], value='Model'),
Type=ToggleButtons(
options=['Total', 'Primary', 'Secondary'], value='Total'),
Scale=ToggleButtons(
options=['Linear', 'Log'], value='Linear')
)
return app
| mit |
fraser-lab/EMRinger | Figures/S5/S5.py | 1 | 12942 | #! /usr/bin/env phenix.python
# Rotamer distribution analysis tool for validation of models generated from cryoEM data.
# Written by Benjamin Barad
# Written for use with Ringer's (http://bl831.als.lbl.gov/ringer/) output.
#
# Ringer Reference:
# Lang PT, Ng HL, Fraser JS, Corn JE, Echols N, Sales M, Holton JM, Alber T.
# Automated electron-density sampling reveals widespread conformational
# polymorphism in proteins. Protein Sci. 2010 Jul;19(7):1420-31. PubMed PMID:
# 20499387
########################################################################
# Package imports
import math
import numpy as np
import os
from libtbx import easy_pickle, adopt_init_args
from emringer import *
import matplotlib.pyplot as plt
import argparse
from collections import OrderedDict
from matplotlib import rcParams
# rcParams['figure.autolayout'] = True
# rcParams['xtick.labelsize'] = 16
# rcParams['ytick.labelsize'] = 16
# rcParams['axes.labelsize'] = 24
# rcParams['axes.titlesize'] = 24
########################################################################
# Argument Parsing
def Parse_stuff():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--files", dest="filenames", help='Filenames (including path if not in current directory) of pkl', nargs='*', default=['/5778.ent_ringer.pkl'])
parser.add_argument("-f", "--firstrotamer", dest="first_rotamer", help='First rotamer angle (between 0 and 120, rounds down to multiple of 5)', nargs='?', type = int, default=60)
parser.add_argument("-x", "--chi_angle", help='Chi angles to be tested. Default is chi1.', nargs="?", type=int, default=1)
parser.add_argument("-s", "--Sampling_Angle", dest="sampling_angle", help="Don't mess with this unless you've also made the corresponding change in ringer. By default it is 5, which is identical to the default in ringer.", nargs='?', default=5)
args = parser.parse_args()
return args
########################################################################
# Classes and Statics- these may eventually want to be moved to ringer.
class Peak:
# The peak object, should eventually get moved into ringer I suspect.
def __init__(self, resname, resid, chain_id, n_chi, chi_value, rho_value):
adopt_init_args(self, locals())
self.chi_value=chi_value%360
def __repr__(self):
return "\n%s\t%s\t%s\t%s\t%d\t%f" % (self.resname,self.resid,self.chain_id,self.n_chi,self.chi_value*5,self.rho_value)
class Peaklist:
# Right now this is just a slightly specialized list. I may add functionality later, however.
def __init__(self):
self.peaks=[]
def sorted(self, *key):
return sorted(self.peaks,*key)
def append_lists(self,other_peaklist):
self.peaks = self.peaks+ other_peaklist.peaks
def add_new(self,resname, resid, chain_id, n_chi, chi_value, rho_value):
self.peaks.append(Peak(resname, resid, chain_id, n_chi, chi_value, rho_value))
def get_peaks(self):
return self.peaks
def __len__(self):
return len(self.peaks)
def __repr__(self):
return str(sorted(self.peaks,key=lambda peak: peak.chi_value))
Residue_codes = ["ARG","ASN","ASP","CYS","GLU","GLN","HIS",
"LEU","LYS","MET","PHE","SER","TRP","TYR","SEC","PYL"]
Ignored_codes = ["ALA","GLY","PRO","THR","ILE","VAL"]
########################################################################
# Child Functions
def statistic(binned_peaks):
# this is the main pair of statistics used for my plots.
# Normal approximation to the binomial theorem.
rotamer_count = sum(binned_peaks[0::2])
total_count = sum(binned_peaks)
stdev = 0.5*math.sqrt(total_count)
mean= total_count/2
# Hacky way to avoid zero division
rotamer_ratio=rotamer_count/(total_count+0.000000000000000000001)
zscore=(rotamer_count-mean)/(stdev+0.000000000000000000001)
# print "\t Rotamer ratio: %.3f" % rotamer_ratio
# print "\t Z-score = %.3f" % zscore
if (zscore>0):
pscore_approx1=0.5-0.5*(math.erf(zscore/math.sqrt(2)))
pscore_approx2=1.0/12*math.exp(-zscore*zscore/2)+1.0/4*math.exp(-zscore*zscore*2/3)
# print "\t One approximation of the p-value is %g" % pscore_approx1
# print "\t Another approximation of the p-value is %g" % pscore_approx2
# else:
# print "\t pscore greater than 0.5"
return zscore, rotamer_ratio
def RMSD_statistic(peak_list):
# Still not clear how useful RMSD is but angular deviations tend to be heavily dependent on sample size (as outliers are overweighted).
squared_deviations=[]
for peak in peak_list:
squared_deviations.append(min((i-peak.chi_value)**2 for i in [60,180,300]))
RMSD = (sum(squared_deviations)/len(squared_deviations))**0.5
return RMSD
def calculate_peaks(ringer,threshold, args):
## Checks if something is greater than either of its neighbors (including wrapping) and returns if true and if above a threshold)
new_peaks=Peaklist()
list = ringer._angles[args.chi_angle].densities
for i in range(len(list)):
if (list[i]==max(list) and list[i]>threshold):
new_peaks.add_new(ringer.resname, ringer.resid, ringer.chain_id, args.chi_angle, i, list[i])
return new_peaks
def parse_pickle(filename, args):
# All processes that require reading the pickle. Involves reading out the angles and calculating the thresholds.
chi = args.chi_angle
waves=[]
averages=[]
maxima=[]
ringer_things = easy_pickle.load(filename)
for i in ringer_things:
if chi in i._angles.keys() and i.resname in Residue_codes:
waves.append(i)
maxima.append(max(i._angles[chi].densities))
averages.append(np.average(i._angles[chi].densities))
max_max = max(maxima)
avg_avg = np.average(averages)
thresholds = [4,8,12,16]
return waves, thresholds
def calculate_binned_counts(peak_count, first=60, binsize=12,n_angles=72):
# Bin peaks by rotamer regions for statistics.
first_loc = int(first/5)
bins = int(n_angles/binsize)
binned_output=[0]*bins
for i in range(bins):
for j in range(binsize):
binned_output[i] += peak_count[int(first_loc+i*binsize-binsize/2+j)%72]
return binned_output
def calc_ratio(count_list, args):
# Calculate the same statistics as the "statistic" call, but do it without ifrst binning the peaks.
total_angles=360/args.sampling_angle
binsize=int(total_angles/6)
first_loc=args.first_rotamer/args.sampling_angle
binned_list=[0]*6
for i in range(6):
for j in range(binsize):
binned_list[i] += count_list[int(first_loc+i*binsize-binsize/2+j)%72]
rotamer_count = sum(binned_list[0::2])
total_count = sum(binned_list)
stdev = 0.5*math.sqrt(total_count)
mean= total_count/2
rotamer_ratio=rotamer_count/(total_count+0.000000000000000000001)
zscore=(rotamer_count-mean)/(stdev+0.000000000000000000001)
return rotamer_ratio, zscore
def make_dir(f):
if not os.path.exists(f):
os.makedirs(f)
########################################################################
# Main Run
def main(args):
for file in args.filenames:
make_dir(file+'.output')
Weird_residues=OrderedDict()
peak_count=OrderedDict()
residue_peak_count={}
rotamer_ratios_residues={}
zscores_residues={}
for i in Residue_codes:
residue_peak_count[i]={}
rotamer_ratios_residues[i]=[]
zscores_residues[i]=[]
binned_peaks={}
zscores=[]
rotamer_ratios=[]
non_zero_thresholds=[]
waves, thresholds = parse_pickle(file, args)
peaks=OrderedDict()
# calculate peaks and histogram
for threshold in thresholds:
peaks[threshold]=Peaklist()
Weird_residues[threshold]=Peaklist()
peak_count[threshold] = [0]*72
for i in Residue_codes:
residue_peak_count[i][threshold]=[0]*72
for i in waves:
peaks[threshold].append_lists(calculate_peaks(i, threshold, args))
for peak in peaks[threshold].get_peaks():
peak_count[threshold][peak.chi_value]+=1
residue_peak_count[peak.resname][threshold][peak.chi_value]+=1
if ((peak.chi_value<6) or (peak.chi_value>18 and peak.chi_value<30) or (peak.chi_value>42 and peak.chi_value<54) or (peak.chi_value>66)):
Weird_residues[threshold].peaks.append(peak)
# Calculate the binned peaks and ratios
binned_peaks[threshold] = calculate_binned_counts(peak_count[threshold], args.first_rotamer)
# print "For threshold %.3f" % threshold
# print "Sample size = %d" % sum(binned_peaks[threshold])
zscore_n, rotamer_ratio_n = statistic(binned_peaks[threshold])
if rotamer_ratio_n==0:
break
for i in Residue_codes:
rotamer_ratios_residues_n, zscores_n = calc_ratio(residue_peak_count[i][threshold], args)
rotamer_ratios_residues[i].append(rotamer_ratios_residues_n)
zscores_residues[i].append(zscores_n)
non_zero_thresholds.append(threshold)
zscores.append(zscore_n)
rotamer_ratios.append(rotamer_ratio_n)
# plot_peaks(peak_count[threshold], file, threshold, args.first_rotamer, RMSD_statistic(peaks[threshold].peaks))
# plot_rotamers(binned_peaks[threshold], file, threshold, args.first_rotamer)
# print "Outliers at threshold %.2f: %s" % (threshold, str(Weird_residues[threshold]))
# plot_progression(non_zero_thresholds, rotamer_ratios, file, zscores)
plot_stacked_bar(peak_count, file, args.first_rotamer)
# for i in Residue_codes:
# plot_progression(non_zero_thresholds, rotamer_ratios_residues[i], file, zscores_residues[i], i)
# easy_pickle.dump(file+'.output/Outliers.pkl',Weird_residues)
########################################################################
# GUI and Output
# def plot_rotamers(binned_output, filename, threshold, first):
# # Binned Histogram
# plt.figure(1)
# plt.clf()
# colors=['blue','red']*3
# angles = range(6)
# bin_angles = [(i*60+first)%360 for i in angles]
# plt.bar(bin_angles, binned_output, align='center', color=colors, width=60)
# plt.savefig('%s.output/%.3f.Phenixed_Histogram.png' % (filename,threshold))
# # print 'Wrote '+filename+'/%.3f.Phenixed_Histogram.png' % threshold
# def plot_peaks(peak_count, filename, threshold, first, title=0):
# plt.figure(2)
# rcParams.update({'figure.autolayout': True})
# plt.clf()
# plt.axvspan((first-30), first+30, color='0.5', alpha=0.5)
# plt.axvspan(first+90, first+150, color='0.5', alpha=0.5)
# plt.axvspan(first+210, (first+270), color='0.5', alpha=0.5)
# angles=range(0,72)
# angles = [i*5 for i in angles]
# plt.bar(angles,peak_count, width=5, align='center', color='b', alpha=0.9)
# plt.title('Peak Counts - Threshold %.3f' % (threshold))
# plt.xlim(-2.5,357.5)
# plt.xlabel(r'$\chi$1 Angle ($\degree$)')
# plt.ylabel("Peak Count")
# plt.savefig('%s.output/%.3f.Phenix_allpeaks.png' % (filename,threshold))
# # print 'RMSD at threshold %.3f is %.1f' % (threshold,title)
# # print 'Wrote '+filename+'/%.3f.Phenix_allpeaks.png' % threshold
# plt.clf()
def plot_stacked_bar(peaklist, filename, first):
colors = ['#F15854', '#FAA43A', '#60BD68', '#5DA5DA', '#B276B2']
fig, ax = plt.subplots(figsize=(6,4.5))
ax.axvspan((first-30), first+30, color='0.5', alpha=0.5, linewidth=0)
ax.axvspan(first+90, first+150, color='0.5', alpha=0.5, linewidth=0)
ax.axvspan(first+210, (first+270), color='0.5', alpha=0.5, linewidth=0)
angles = np.arange(0,365,5)
i = 0
for threshold, list in peaklist.items():
list.append(list[0])
ax.bar(angles, list, width=5, align="center", color=colors[i], label="Threshold: %.1f" % threshold)
# ax.plot(angles, list, label="Threshold: %.3f" % threshold)
i = i+1
ax.set_xlim([0, 360])
ax.set_xticks([i*60 for i in range(7)])
ax.set_xlabel(r"Chi1 Angle ($\degree$)", labelpad=10)
ax.set_ylabel("Peak Count", labelpad=10)
ax.yaxis.set_ticks_position('left') # this one is optional but I still recommend it...
ax.xaxis.set_ticks_position('bottom')
# ax.set_title("Histogram of ringer analysis\nat different thresholds", y=1.05)
ax.legend(loc=2, fontsize=10)
fig.savefig('S5.png')
fig.clf()
# def plot_progression(non_zero_thresholds, rotamer_ratios, file, zscores, i="Total"):
# fig = plt.figure(2)
# ax1 = plt.subplot()
# ax1.plot(non_zero_thresholds, zscores, 'b-', linewidth=3.0, alpha=0.7)
# ax1.set_xlabel('Electron Potential Threshold', fontsize=24)
# # Make the y-axis label and tick labels match the line color.
# ax1.set_ylabel('Statistical Significance', color='b', fontsize=24)
# for tl in ax1.get_yticklabels():
# tl.set_color('b')
# tl.set_fontsize(16)
# for label in ax1.get_xticklabels():
# label.set_fontsize(16)
# ax1.set_ylim([-21,21])
# ax1.axhspan(-0.5,0.5,color='b',alpha=0.1)
# ax2 = ax1.twinx()
# ax2.plot(non_zero_thresholds, rotamer_ratios, 'r-', label = i, linewidth=3.0, alpha=0.7)
# ax2.set_ylim([0,1])
# ax2.set_ylabel(r'% Rotameric Residues', color='r', fontsize=24)
# # ax2.set_xlim([0.005,0.03])
# for tl in ax2.get_yticklabels():
# tl.set_color('r')
# tl.set_fontsize(16)
# if i != "Total":
# plt.title("Threshold Scan - %s" % i) # % i, fontsize=22)
# else:
# plt.title("Threshold Scan")
# plt.savefig('%s.output/%s.threshold_scan.png' % (file, i))
# # print 'Wrote '+file+'/threshold_scan.png'
# plt.clf()
if __name__ == "__main__":
args = Parse_stuff()
main(args)
| bsd-3-clause |
smcantab/pele | pele/amber/amberSystem.py | 4 | 26626 | """
System class for biomolecules using AMBER ff.
Set up using prmtop and inpcrd files used in Amber GMIN and Optim.
Potential parameters (e.g. non-bonded cut-offs are set in
TODO:
Parameters
----------
prmtopFname : str
prmtop file name
inpcrdFname : str
inpcrd file name
See Also
--------
BaseSystem
"""
# utils
import tempfile
import os
import shutil
import numpy as np
# pele
from pele.systems import BaseSystem
from pele.mindist import ExactMatchAtomicCluster, MinPermDistAtomicCluster
from pele.transition_states import orthogopt
from pele.landscape import smooth_path
from pele.systems import BaseParameters
from pele.utils.elements import elements
from pele.systems.spawn_OPTIM import SpawnOPTIM
from read_amber import parse_topology_file
__all__ = ["AMBERSystem"]
class AMBERSystem(BaseSystem):
def __init__(self, prmtopFname, inpcrdFname):
super(AMBERSystem, self).__init__()
self.prmtopFname = prmtopFname
self.inpcrdFname = inpcrdFname
self.parse_prmtop()
# self.potential = self.get_potential()
self.set_params(self.params)
# self.natoms = self.potential.prmtop.topology._numAtoms
self.params.database.accuracy = 1e-3
self.params.basinhopping["temperature"] = 1.
self.params.takestep_random_displacement = BaseParameters()
self.params.takestep_random_displacement.stepsize = 2.
self.params.basinhopping.insert_rejected = False
# self.sanitycheck = True # todo: this should be part of params and show up in GUI
self.sanitycheck = False
if self.sanitycheck:
# self.params.basinhopping.confCheck = [self.check_cistrans_wrapper, self.check_CAchirality_wrapper]
self.params.basinhopping.confCheck = [self.check_CAchirality_wrapper]
self.params.double_ended_connect.conf_checks = [self.check_cistrans_wrapper_kwargs,
self.check_CAchirality_wrapper_kwargs]
def parse_prmtop(self):
self.prmtop_parsed = parse_topology_file(self.prmtopFname)
atoms = self.prmtop_parsed.atoms.nodes()
atoms = sorted(atoms, key=lambda a: a.index)
self.atom_names = [a.element for a in atoms]
self.bonds = [(a1.index, a2.index) for a1, a2 in
self.prmtop_parsed.atoms.edges_iter()]
# def get_minimizer(self, **kwargs):
# """return a function to minimize the structure"""
# # overriding the C++ minimizer which is giving an error with openmm potential
# pot = self.get_potential()
# # kwargs = dict_copy_update(self.params["structural_quench_params"], kwargs)
# # return lambda coords: lbfgs_cpp(coords, pot, **kwargs)
# from pele.optimize import lbfgs_py
# return lambda coords: lbfgs_py(coords, pot, **kwargs)
def get_ndof(self):
return 3. * len(self.atom_names)
def set_params(self, params):
"""set default parameters for the system"""
# set NEBparams
NEBparams = params.double_ended_connect.local_connect_params.NEBparams
NEBparams.NEBquenchParams = BaseParameters()
# NEBquenchParams = NEBparams.NEBquenchParams
NEBparams.iter_density = 15.
NEBparams.image_density = 3.5
NEBparams.max_images = 50
NEBparams.k = 100.
NEBparams.adjustk_freq = 5
if False: # use fire
from pele.optimize import fire
NEBparams.quenchRoutine = fire
else: # use lbfgs
NEBparams.NEBquenchParams.maxErise = 100.5
NEBparams.NEBquenchParams.maxstep = .1
NEBparams.NEBquenchParams.tol = 1e-2
NEBparams.reinterpolate = 50
NEBparams.adaptive_niter = True
NEBparams.adaptive_nimages = True
NEBparams.adjustk_freq = 50
# set transition state search params
tsSearchParams = params.double_ended_connect.local_connect_params.tsSearchParams
tsSearchParams.nsteps = 200
tsSearchParams.lowestEigenvectorQuenchParams["nsteps"] = 100
tsSearchParams.lowestEigenvectorQuenchParams["tol"] = 0.001
tsSearchParams.tangentSpaceQuenchParams["maxstep"] = .1
tsSearchParams.nfail_max = 1000
tsSearchParams.nsteps_tangent1 = 5
tsSearchParams.nsteps_tangent2 = 100
tsSearchParams.max_uphill_step = .3
# control the output
tsSearchParams.verbosity = 0
NEBparams.NEBquenchParams.iprint = 50
tsSearchParams.lowestEigenvectorQuenchParams["iprint"] = -50
tsSearchParams.tangentSpaceQuenchParams["iprint"] = -5
tsSearchParams["iprint"] = 10
def __call__(self):
return self
def get_potential(self):
""" First attempts to get the potential from GMIN, then from OpenMM. If both fail, sets it to None """
if hasattr(self, 'potential'):
if self.potential is not None:
return self.potential
# default is None
self.potential = None
# get potential from GMIN
if os.path.exists('min.in') and os.path.exists('data'):
print '\nFiles min.in and data found. trying to import ambgmin_ now ..'
try:
import ambgmin_
import gmin_potential
self.potential = gmin_potential.GMINAmberPotential(self.prmtopFname, self.inpcrdFname)
print '\namberSystem> Using GMIN Amber potential ..'
return self.potential
except ImportError:
# using OpenMM because ambgmin_ could not be imported
print '\namberSystem> could not import ambgmin_. Will try OpenMM .. '
# get potential from OpenMM
try:
import openmm_potential
self.potential = openmm_potential.OpenMMAmberPotential(self.prmtopFname, self.inpcrdFname)
print '\namberSystem> Using OpenMM amber potential ..'
# check for openmm version
# data structures changed between openmm4 and 5
# crude check - todo
if hasattr(self.potential.prmtop.topology._bonds, 'index'):
self.OpenMMVer = 5
else:
self.OpenMMVer = 4
return self.potential
except AttributeError:
print '\namberSystem> could not import openmm_potential ..'
if self.potenial == None:
print '\namberSystem> potential not set. Could not import GMIN or OpenMM potential.'
def get_random_configuration(self):
"""set coordinates before calling BH etc."""
""" returns a 1-D numpy array of length 3xNatoms """
# using pele.amber.read_amber and inpcrd
from pele.amber.read_amber import read_amber_coords
coords = read_amber_coords(self.inpcrdFname)
print "amberSystem> Number of coordinates:", len(coords)
coords = np.reshape(np.transpose(coords), len(coords), 1)
# -- OpenMM
# from simtk.unit import angstrom as openmm_angstrom
## using pdb
# from simtk.openmm.app import pdbfile as openmmpdbReader
# pdb = openmmpdbReader.PDBFile('coords.pdb') # todo: coords.pdb is hardcoded
# coords = pdb.getPositions() / openmm_angstrom
# coords = np.reshape(np.transpose(coords), 3*len(coords),1 )
## using input inpcrd
# from simtk.openmm.app import AmberInpcrdFile
# inpcrd = AmberInpcrdFile( self.inpcrdFname )
# coords = inpcrd.getPositions() / openmm_angstrom
# coords = np.reshape(np.transpose(coords), 3*len(coords),1 )
return coords
def get_metric_tensor(self, coords):
"""metric tensor for all masses m_i=1.0 """
print 'amberSystem> setting up mass matrix for normal modes'
# return np.identity(coords.size)
massMatrix_tmp = np.identity(coords.size)
# get masses from 'elements' file
for i in self.potential.prmtop.topology.atoms():
atomNum = i.index
atomElem = i.name[0] # assuming elements corresponding to first character of atom name
m = elements[atomElem]['mass']
massMatrix_tmp[atomNum][atomNum] = 1 / m
return massMatrix_tmp
def get_permlist(self):
import pdb2permlist
# return [[0, 2, 3], [11, 12, 13], [19, 20, 21] ] # aladipep
# return [[0, 2, 3], [11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43], [49,50,51]] # tetraala
if os.path.exists('coordsModTerm.pdb'):
print '\namberSystem> constructing perm list from coordsModTerm.pdb'
print ' (see comments in amberPDB_to_permList.py)'
plist = pdb2permlist.pdb2permList('coordsModTerm.pdb')
print '\namberSystem> Groups of permutable atoms (atom numbers start at 0) = '
for i in plist:
print i
return plist
else:
print 'amberSystem> coordsModTerm.pdb not found. permlist could not be created.'
return []
def get_mindist(self):
permlist = self.get_permlist()
return MinPermDistAtomicCluster(permlist=permlist, niter=10, can_invert=False)
def get_orthogonalize_to_zero_eigenvectors(self):
return orthogopt
def get_compare_exact(self, **kwargs):
permlist = self.get_permlist()
return ExactMatchAtomicCluster(permlist=permlist, **kwargs)
def smooth_path(self, path, **kwargs):
mindist = self.get_mindist()
return smooth_path(path, mindist, **kwargs)
def drawCylinder(self, X1, X2):
from OpenGL import GL, GLU
z = np.array([0., 0., 1.]) # default cylinder orientation
p = X2 - X1 # desired cylinder orientation
r = np.linalg.norm(p)
t = np.cross(z, p) # angle about which to rotate
a = np.arccos(np.dot(z, p) / r) # rotation angle
a *= (180. / np.pi) # change units to angles
GL.glPushMatrix()
GL.glTranslate(X1[0], X1[1], X1[2])
GL.glRotate(a, t[0], t[1], t[2])
g = GLU.gluNewQuadric()
GLU.gluCylinder(g, .1, 0.1, r, 30, 30) # I can't seem to draw a cylinder
GL.glPopMatrix()
def draw(self, coordsl, index):
from pele.systems._opengl_tools import draw_sphere
coords = coordsl.reshape([-1, 3])
com = np.mean(coords, axis=0)
# draw atoms as spheres
for i, name in enumerate(self.atom_names): # in self.potential.prmtop.topology.atoms():
x = coords[i, :] - com
col = elements[name]['color']
if index == 2:
col = [0.5, 1.0, .5]
rad = elements[name]['radius'] / 5
draw_sphere(x, radius=rad, color=col)
# draw bonds
for atomPairs in self.bonds: # self.potential.prmtop.topology.bonds():
# note that atom numbers in topology start at 0
xyz1 = coords[atomPairs[0]] - com
xyz2 = coords[atomPairs[1]] - com
self.drawCylinder(xyz1, xyz2)
def load_coords_pymol(self, coordslist, oname, index=1):
"""load the coords into pymol
the new object must be named oname so we can manipulate it later
Parameters
----------
coordslist : list of arrays
oname : str
the new pymol object must be named oname so it can be manipulated
later
index : int
we can have more than one molecule on the screen at one time. index tells
which one to draw. They are viewed at the same time, so should be
visually distinct, e.g. different colors. accepted values are 1 or 2
Notes
-----
the implementation here is a bit hacky. we create a temporary xyz file from coords
and load the molecule in pymol from this file.
"""
# pymol is imported here so you can do, e.g. basinhopping without installing pymol
import pymol
# create the temporary file
suffix = ".pdb"
f = tempfile.NamedTemporaryFile(mode="w", suffix=suffix)
fname = f.name
from simtk.openmm.app import pdbfile as openmmpdb
# write the coords into pdb file
from pele.mindist import CoMToOrigin
ct = 0
for coords in coordslist:
ct += 1
coords = CoMToOrigin(coords.copy())
self.potential.copyToLocalCoords(coords)
from simtk.unit import angstrom as openmm_angstrom
# openmmpdb.PDBFile.writeFile(self.potential.prmtop.topology , self.potential.localCoords * openmm_angstrom , file=sys.stdout, modelIndex=1)
openmmpdb.PDBFile.writeModel(self.potential.prmtop.topology, self.potential.localCoords * openmm_angstrom,
file=f, modelIndex=ct)
print "closing file"
f.flush()
# load the molecule from the temporary file
pymol.cmd.load(fname)
# get name of the object just created and change it to oname
objects = pymol.cmd.get_object_list()
objectname = objects[-1]
pymol.cmd.set_name(objectname, oname)
# set the representation
pymol.cmd.hide("everything", oname)
pymol.cmd.show("lines", oname)
# # set the color according to index
# if index == 1:
# pymol.cmd.color("red", oname)
# else:
# pymol.cmd.color("blue", oname)
def get_optim_spawner(self, coords1, coords2):
import os
from pele.config import config
optim = config.get("exec", "AMBOPTIM")
optim = os.path.expandvars(os.path.expanduser(optim))
print "optim executable", optim
return AmberSpawnOPTIM(coords1, coords2, self, OPTIM=optim, tempdir=False)
def populate_peptideAtomList(self):
listofC = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "C"]
listofO = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "O"]
listofN = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "N"]
listofH = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "H"]
# atom numbers of peptide bond
self.peptideBondAtoms = []
for i in listofC:
if listofO.__contains__(i + 1) and listofN.__contains__(i + 2) and listofH.__contains__(i + 3):
self.peptideBondAtoms.append([i, i + 1, i + 2, i + 3])
print '\namberSystem> Peptide bond atom numbers (C,O,N,H, in order): '
for i in self.peptideBondAtoms:
print i
def populate_CAneighborList(self):
listofCA = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "CA"]
listofC = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "C"]
listofN = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "N"]
listofCB = [i.index for i in self.potential.prmtop.topology.atoms() if i.name == "CB"]
# atom numbers of peptide bond
self.CAneighborList = []
for i in listofCA:
# find atoms bonded to CA
neighborlist = []
for b in self.potential.prmtop.topology.bonds():
# print b
if b[0] == i:
neighborlist.append(b[1])
if b[1] == i:
neighborlist.append(b[0])
# Commented, since this stuff doesn't seem to work at the moment...
# if self.OpenMMVer == 5 :
# # openmm5
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
# else: # openmm4
# if b[0].index == i:
# neighborlist.append(b[1].index)
# if b[1].index == i:
# neighborlist.append(b[0].index)
# print '---bonds = ', b[0].index , b[1].index
# print '---amberSystem> atoms bonded to CA ',i, ' = ', neighborlist
nn = [i]
# append C (=O)
for n in neighborlist:
if listofC.__contains__(n):
nn.append(n)
# append CB
for n in neighborlist:
if listofCB.__contains__(n):
nn.append(n)
# append N
for n in neighborlist:
if listofN.__contains__(n):
nn.append(n)
self.CAneighborList.append(nn)
# atoms numbers start at 0
print '\namberSystem> CA neighbors atom numbers (CA,C(=O),CB, N, in order): '
for i in self.CAneighborList:
print i
def check_cistrans_wrapper_kwargs(self, coords=None, **kwargs):
print 'in check_cistrans_wrapper_kwargs'
return self.check_cistrans(coords)
def check_cistrans_wrapper(self, energy, coords, **kwargs):
return self.check_cistrans(coords)
def check_cistrans(self, coords):
"""
Sanity check on the isomer state of peptide bonds
Returns False if the check fails i.e. if any of the peptide bond is CIS
"""
if not hasattr(self, "peptideBondAtoms"):
# atom numbers of peptide bonds
self.populate_peptideAtomList()
import measure
m = measure.Measure()
isTrans = True
for i in self.peptideBondAtoms:
atNum = i[0]
rC = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[1]
rO = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[2]
rN = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[3]
rH = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
# compute O-C-N-H torsion angle
rad, deg = m.torsion(rO, rC, rN, rH)
# print 'peptide torsion (deg) ', i, ' = ', deg
# check cis
if deg < 90 or deg > 270:
isTrans = False
print 'CIS peptide bond between atoms ', i, ' torsion (deg) = ', deg
return isTrans
def check_CAchirality_wrapper_kwargs(self, coords=None, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality_wrapper(self, energy, coords, **kwargs):
return self.check_CAchirality(coords)
def check_CAchirality(self, coords):
"""
Sanity check on the CA to check if it is L of D
Returns False if the check fails i.e. if any D-amino acid is present
"""
if not hasattr(self, "CAneighborList"):
# atom numbers of CA neighbors
self.populate_CAneighborList()
# print 'in check CA chirality'
import measure
m = measure.Measure()
isL = True
for i in self.CAneighborList:
atNum = i[0]
rCA = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[1]
rC = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[2]
rCB = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
atNum = i[3]
rN = np.array([coords[3 * atNum], coords[3 * atNum + 1], coords[3 * atNum + 2]])
# compute improper torsion angle between C-CA-CB and CA-CB-N
rad, deg = m.torsion(rC, rCA, rCB, rN)
# check cis
if deg < 180:
# this condition was found by inspection of structures todo
isL = False
print 'chiral state of CA atom ', i[0], ' is D'
print 'CA improper torsion (deg) ', i, ' = ', deg
return isL
def test_potential(self, pdbfname):
""" tests amber potential for pdbfname
Input
-----
pdbfname = full path to pdb file
"""
# read a conformation from pdb file
print 'reading conformation from coords.pdb'
from simtk.openmm.app import pdbfile as openmmpdb
from simtk.unit import angstrom as openmm_angstrom
pdb = openmmpdb.PDBFile(pdbfname)
coords = pdb.getPositions() / openmm_angstrom
coords = np.reshape(np.transpose(coords), 3 * len(coords), 1)
self.potential = self.get_potential()
e = self.potential.getEnergy(coords)
print 'Energy (kJ/mol) = '
print e
e, g = self.potential.getEnergyGradient(coords)
gnum = self.potential.NumericalDerivative(coords, eps=1e-6)
print 'Energy (kJ/mol) = '
print e
print 'Analytic Gradient = '
print g[1:3]
print 'Numerical Gradient = '
print gnum[1:3]
print 'Num vs Analytic Gradient ='
print np.max(np.abs(gnum - g)), np.max(np.abs(gnum))
print np.max(np.abs(gnum - g)) / np.max(np.abs(gnum))
def test_connect(self, database):
# connect the all minima to the lowest minimum
minima = database.minima()
min1 = minima[0]
for min2 in minima[1:]:
connect = self.get_double_ended_connect(min1, min2, database)
connect.connect()
def test_disconn_graph(self, database):
from pele.utils.disconnectivity_graph import DisconnectivityGraph
from pele.landscape import TSGraph
import matplotlib.pyplot as plt
graph = TSGraph(database).graph
dg = DisconnectivityGraph(graph, nlevels=3, center_gmin=True)
dg.calculate()
dg.plot()
plt.show()
def test_BH_group_rotation(self, db, nsteps, parameters):
from playground.group_rotation.group_rotation import GroupRotation
take_step_gr = GroupRotation(parameters)
self.params.basinhopping["temperature"] = 10.0
bh = self.get_basinhopping(database=db, takestep=take_step_gr)
print "Running BH with group rotation ..."
bh.run(nsteps)
print "Number of minima found = ", len(db.minima())
min0 = db.minima()[0]
print "lowest minimum found has energy = ", min0.energy
def test_BH(self, db, nsteps):
self.potential = self.get_potential()
from pele.takestep import RandomDisplacement, AdaptiveStepsizeTemperature
takeStepRnd = RandomDisplacement(stepsize=2)
tsAdaptive = AdaptiveStepsizeTemperature(takeStepRnd, interval=10, verbose=True)
self.params.basinhopping["temperature"] = 10.0
# todo - how do you save N lowest?
bh = self.get_basinhopping(database=db, takestep=takeStepRnd)
bh = self.get_basinhopping(database=db, takestep=tsAdaptive)
print 'Running BH .. '
bh.run(nsteps)
print "Number of minima found = ", len(db.minima())
min0 = db.minima()[0]
print "lowest minimum found has energy = ", min0.energy
def test_mindist(self, db):
m1, m2 = db.minima()[:2]
mindist = sys.get_mindist()
dist, c1, c2 = mindist(m1.coords, m2.coords)
print "distance", dist
class AmberSpawnOPTIM(SpawnOPTIM):
def __init__(self, coords1, coords2, sys, **kwargs):
super(AmberSpawnOPTIM, self).__init__(coords1, coords2, **kwargs)
self.sys = sys
def write_odata_coords(self, coords, fout):
pass
def write_perm_allow(self, fname):
permallow = self.make_permallow_from_permlist(self.sys.get_permlist())
with open(fname, "w") as fout:
fout.write(permallow)
def write_additional_input_files(self, rundir, coords1, coords2):
# write start
with open(rundir + "/start", "w") as fout:
for xyz in coords1.reshape(-1, 3):
fout.write("%f %f %f\n" % tuple(xyz))
# write coords.prmtop and coords.inpcrd
shutil.copyfile(self.sys.prmtopFname, rundir + "/coords.prmtop")
shutil.copyfile(self.sys.inpcrdFname, rundir + "/coords.inpcrd")
min_in = """
STOP
&cntrl
imin = 1,
ncyc = 1,
maxcyc = 1,
igb = 0,
ntb = 0,
cut = 999.99,
rgbmax = 25.0,
ifswitch = 1
/
"""
with open(rundir + "/min.in", "w") as fout:
fout.write(min_in)
def write_odata(self, fout):
odatastr = """
DUMPALLPATHS
UPDATES 6000
NEWCONNECT 15 3 2.0 20.0 30 0.5
CHECKCHIRALITY
comment PATH dumps intermediate conformations along the path
PATH 100 1.0D-2
COMMENT NEWNEB 30 500 0.01
NEBK 10.0
comment DUMPNEBXYZ
AMBERIC
comment AMBERSTEP
DIJKSTRA EXP
DUMPALLPATHS
REOPTIMISEENDPOINTS
COMMENT MAXTSENERGY -4770.0
EDIFFTOL 1.0D-4
MAXERISE 1.0D-4 1.0D0
GEOMDIFFTOL 0.05D0
BFGSTS 500 10 100 0.01 100
NOIT
BFGSMIN 1.0D-6
PERMDIST
MAXSTEP 0.1
TRAD 0.2
MAXMAX 0.3
BFGSCONV 1.0D-6
PUSHOFF 0.1
STEPS 800
BFGSSTEPS 2000
MAXBFGS 0.1
NAB start
"""
fout.write(odatastr)
fout.write("\n")
# ============================ MAIN ================================
if __name__ == "__main__":
# create new amber system
sysAmb = AMBERSystem('../../examples/amber/aladipep/coords.prmtop', '../../examples/amber/aladipep/coords.inpcrd')
# load existing database
from pele.storage import Database
dbcurr = Database(db="../../examples/amber/aladipep/aladipep.db")
coords = sysAmb.get_random_configuration()
# aa = sysAmb.get_metric_tensor(coords)
# ------- TEST gui
from pele.gui import run as gr
gr.run_gui(sysAmb)
# ------ Test potential
sysAmb.test_potential("../../examples/amber/aladipep/coords.pdb")
# ------ BH
nsteps = 100
sysAmb.test_BH(dbcurr, nsteps)
exit()
# ------- Connect runs
sysAmb.test_connect(dbcurr)
# ------- Disconn graph
sysAmb.test_disconn_graph(dbcurr)
# ------- Test mindist
sysAmb.test_mindist(dbcurr)
| gpl-3.0 |
cuemacro/finmarketpy | finmarketpy/curve/fxoptionscurve.py | 1 | 39487 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy as np
import pandas as pd
from pandas.tseries.offsets import CustomBusinessDay, CustomBusinessMonthEnd
from findatapy.market import Market, MarketDataRequest
from findatapy.timeseries import Calculations, Calendar, Filter
from findatapy.util.dataconstants import DataConstants
from findatapy.util.fxconv import FXConv
from finmarketpy.curve.volatility.fxoptionspricer import FXOptionsPricer
from finmarketpy.curve.volatility.fxvolsurface import FXVolSurface
from finmarketpy.util.marketconstants import MarketConstants
data_constants = DataConstants()
market_constants = MarketConstants()
class FXOptionsCurve(object):
"""Constructs continuous forwards time series total return indices from underlying forwards contracts.
"""
def __init__(self, market_data_generator=None,
fx_vol_surface=None,
enter_trading_dates=None,
fx_options_trading_tenor=market_constants.fx_options_trading_tenor,
roll_days_before=market_constants.fx_options_roll_days_before,
roll_event=market_constants.fx_options_roll_event, construct_via_currency='no',
fx_options_tenor_for_interpolation=market_constants.fx_options_tenor_for_interpolation,
base_depos_tenor=data_constants.base_depos_tenor,
roll_months=market_constants.fx_options_roll_months,
cum_index=market_constants.fx_options_cum_index,
strike=market_constants.fx_options_index_strike,
contract_type=market_constants.fx_options_index_contract_type,
premium_output=market_constants.fx_options_index_premium_output,
position_multiplier=1,
depo_tenor_for_option=market_constants.fx_options_depo_tenor,
freeze_implied_vol=market_constants.fx_options_freeze_implied_vol,
tot_label='',
cal=None,
output_calculation_fields=market_constants.output_calculation_fields):
"""Initializes FXForwardsCurve
Parameters
----------
market_data_generator : MarketDataGenerator
Used for downloading market data
fx_vol_surface : FXVolSurface
We can specify the FX vol surface beforehand if we want
fx_options_trading_tenor : str
What is primary forward contract being used to trade (default - '1M')
roll_days_before : int
Number of days before roll event to enter into a new forwards contract
roll_event : str
What constitutes a roll event? ('month-end', 'quarter-end', 'year-end', 'expiry')
cum_index : str
In total return index, do we compute in additive or multiplicative way ('add' or 'mult')
construct_via_currency : str
What currency should we construct the forward via? Eg. if we asked for AUDJPY we can construct it via
AUDUSD & JPYUSD forwards, as opposed to AUDJPY forwards (default - 'no')
fx_options_tenor_for_interpolation : str(list)
Which forwards should we use for interpolation
base_depos_tenor : str(list)
Which base deposits tenors do we need (this is only necessary if we want to start inferring depos)
roll_months : int
After how many months should we initiate a roll. Typically for trading 1M this should 1, 3M this should be 3
etc.
tot_label : str
Postfix for the total returns field
cal : str
Calendar to use for expiry (if None, uses that of FX pair)
output_calculation_fields : bool
Also output additional data should forward expiries etc. alongside total returns indices
"""
self._market_data_generator = market_data_generator
self._calculations = Calculations()
self._calendar = Calendar()
self._filter = Filter()
self._fx_vol_surface = fx_vol_surface
self._enter_trading_dates = enter_trading_dates
self._fx_options_trading_tenor = fx_options_trading_tenor
self._roll_days_before = roll_days_before
self._roll_event = roll_event
self._construct_via_currency = construct_via_currency
self._fx_options_tenor_for_interpolation = fx_options_tenor_for_interpolation
self._base_depos_tenor = base_depos_tenor
self._roll_months = roll_months
self._cum_index = cum_index
self._contact_type = contract_type
self._strike = strike
self._premium_output = premium_output
self._position_multiplier = position_multiplier
self._depo_tenor_for_option = depo_tenor_for_option
self._freeze_implied_vol = freeze_implied_vol
self._tot_label = tot_label
self._cal = cal
self._output_calculation_fields = output_calculation_fields
def generate_key(self):
from findatapy.market.ioengine import SpeedCache
# Don't include any "large" objects in the key
return SpeedCache().generate_key(self, ['_market_data_generator', '_calculations', '_calendar', '_filter'])
def fetch_continuous_time_series(self, md_request, market_data_generator, fx_vol_surface=None, enter_trading_dates=None,
fx_options_trading_tenor=None,
roll_days_before=None, roll_event=None,
construct_via_currency=None, fx_options_tenor_for_interpolation=None, base_depos_tenor=None,
roll_months=None, cum_index=None,
strike=None, contract_type=None, premium_output=None,
position_multiplier=None,
depo_tenor_for_option=None,
freeze_implied_vol=None,
tot_label=None, cal=None,
output_calculation_fields=None):
if fx_vol_surface is None: fx_vol_surface = self._fx_vol_surface
if enter_trading_dates is None: enter_trading_dates = self._enter_trading_dates
if market_data_generator is None: market_data_generator = self._market_data_generator
if fx_options_trading_tenor is None: fx_options_trading_tenor = self._fx_options_trading_tenor
if roll_days_before is None: roll_days_before = self._roll_days_before
if roll_event is None: roll_event = self._roll_event
if construct_via_currency is None: construct_via_currency = self._construct_via_currency
if fx_options_tenor_for_interpolation is None: fx_options_tenor_for_interpolation = self._fx_options_tenor_for_interpolation
if base_depos_tenor is None: base_depos_tenor = self._base_depos_tenor
if roll_months is None: roll_months = self._roll_months
if strike is None: strike = self._strike
if contract_type is None: contract_type = self._contact_type
if premium_output is None: premium_output = self._premium_output
if position_multiplier is None: position_multiplier = self._position_multiplier
if depo_tenor_for_option is None: depo_tenor_for_option = self._depo_tenor_for_option
if freeze_implied_vol is None: freeze_implied_vol = self._freeze_implied_vol
if tot_label is None: tot_label = self._tot_label
if cal is None: cal = self._cal
if output_calculation_fields is None: output_calculation_fields = self._output_calculation_fields
# Eg. we construct EURJPY via EURJPY directly (note: would need to have sufficient options/forward data for this)
if construct_via_currency == 'no':
if fx_vol_surface is None:
# Download FX spot, FX forwards points and base depos etc.
market = Market(market_data_generator=market_data_generator)
md_request_download = MarketDataRequest(md_request=md_request)
fx_conv = FXConv()
# CAREFUL: convert the tickers to correct notation, eg. USDEUR => EURUSD, because our data
# should be fetched in correct convention
md_request_download.tickers = [fx_conv.correct_notation(x) for x in md_request.tickers]
md_request_download.category = 'fx-vol-market'
md_request_download.fields = 'close'
md_request_download.abstract_curve = None
md_request_download.fx_options_tenor = fx_options_tenor_for_interpolation
md_request_download.base_depos_tenor = base_depos_tenor
# md_request_download.base_depos_currencies = []
forwards_market_df = market.fetch_market(md_request_download)
else:
forwards_market_df = None
# Now use the original tickers
return self.construct_total_return_index(md_request.tickers, forwards_market_df,
fx_vol_surface=fx_vol_surface,
enter_trading_dates=enter_trading_dates,
fx_options_trading_tenor=fx_options_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_options_tenor_for_interpolation=fx_options_tenor_for_interpolation,
roll_months=roll_months, cum_index=cum_index,
strike=strike, contract_type=contract_type,
premium_output=premium_output,
position_multiplier=position_multiplier,
freeze_implied_vol=freeze_implied_vol,
depo_tenor_for_option=depo_tenor_for_option,
tot_label=tot_label, cal=cal,
output_calculation_fields=output_calculation_fields)
else:
# eg. we calculate via your domestic currency such as USD, so returns will be in your domestic currency
# Hence AUDJPY would be calculated via AUDUSD and JPYUSD (subtracting the difference in returns)
total_return_indices = []
for tick in md_request.tickers:
base = tick[0:3]
terms = tick[3:6]
md_request_base = MarketDataRequest(md_request=md_request)
md_request_base.tickers = base + construct_via_currency
md_request_terms = MarketDataRequest(md_request=md_request)
md_request_terms.tickers = terms + construct_via_currency
# Construct the base and terms separately (ie. AUDJPY => AUDUSD & JPYUSD)
base_vals = self.fetch_continuous_time_series(md_request_base, market_data_generator,
fx_vol_surface=fx_vol_surface,
enter_trading_dates=enter_trading_dates,
fx_options_trading_tenor=fx_options_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_options_tenor_for_interpolation=fx_options_tenor_for_interpolation,
base_depos_tenor=base_depos_tenor,
roll_months=roll_months, cum_index=cum_index,
strike=strike, contract_type=contract_type,
premium_output=premium_output,
position_multiplier=position_multiplier,
depo_tenor_for_option=depo_tenor_for_option,
freeze_implied_vol=freeze_implied_vol,
tot_label=tot_label, cal=cal,
output_calculation_fields=output_calculation_fields,
construct_via_currency='no')
terms_vals = self.fetch_continuous_time_series(md_request_terms, market_data_generator,
fx_vol_surface=fx_vol_surface,
enter_trading_dates=enter_trading_dates,
fx_options_trading_tenor=fx_options_trading_tenor,
roll_days_before=roll_days_before, roll_event=roll_event,
fx_options_tenor_for_interpolation=fx_options_tenor_for_interpolation,
base_depos_tenor=base_depos_tenor,
roll_months=roll_months, cum_index=cum_index,
strike=strike, contract_type=contract_type,
position_multiplier=position_multiplier,
depo_tenor_for_option=depo_tenor_for_option,
freeze_implied_vol=freeze_implied_vol,
tot_label=tot_label, cal=cal,
output_calculation_fields=output_calculation_fields,
construct_via_currency='no')
# Special case for USDUSD case (and if base or terms USD are USDUSD
if base + terms == construct_via_currency + construct_via_currency:
base_rets = self._calculations.calculate_returns(base_vals)
cross_rets = pd.DataFrame(0, index=base_rets.index, columns=base_rets.columns)
elif base + construct_via_currency == construct_via_currency + construct_via_currency:
cross_rets = -self._calculations.calculate_returns(terms_vals)
elif terms + construct_via_currency == construct_via_currency + construct_via_currency:
cross_rets = self._calculations.calculate_returns(base_vals)
else:
base_rets = self._calculations.calculate_returns(base_vals)
terms_rets = self._calculations.calculate_returns(terms_vals)
cross_rets = base_rets.sub(terms_rets.iloc[:, 0], axis=0)
# First returns of a time series will by NaN, given we don't know previous point
cross_rets.iloc[0] = 0
cross_vals = self._calculations.create_mult_index(cross_rets)
cross_vals.columns = [tick + '-option-tot.close']
total_return_indices.append(cross_vals)
return self._calculations.join(total_return_indices, how='outer')
def unhedged_asset_fx(self, assets_df, asset_currency, home_curr, start_date, finish_date, spot_df=None):
pass
def hedged_asset_fx(self, assets_df, asset_currency, home_curr, start_date, finish_date, spot_df=None,
total_return_indices_df=None):
pass
def get_day_count_conv(self, currency):
if currency in market_constants.currencies_with_365_basis:
return 365.0
return 360.0
def construct_total_return_index(self, cross_fx, market_df,
fx_vol_surface=None,
enter_trading_dates=None,
fx_options_trading_tenor=None,
roll_days_before=None,
roll_event=None,
roll_months=None,
cum_index=None,
strike=None,
contract_type=None,
premium_output=None,
position_multiplier=None,
fx_options_tenor_for_interpolation=None,
freeze_implied_vol=None,
depo_tenor_for_option=None,
tot_label=None,
cal=None,
output_calculation_fields=None):
if fx_vol_surface is None: fx_vol_surface = self._fx_vol_surface
if enter_trading_dates is None: enter_trading_dates = self._enter_trading_dates
if fx_options_trading_tenor is None: fx_options_trading_tenor = self._fx_options_trading_tenor
if roll_days_before is None: roll_days_before = self._roll_days_before
if roll_event is None: roll_event = self._roll_event
if roll_months is None: roll_months = self._roll_months
if cum_index is None: cum_index = self._cum_index
if strike is None: strike = self._strike
if contract_type is None: contract_type = self._contact_type
if premium_output is None: premium_output = self._premium_output
if position_multiplier is None: position_multiplier = self._position_multiplier
if fx_options_tenor_for_interpolation is None: fx_options_tenor_for_interpolation = self._fx_options_tenor_for_interpolation
if freeze_implied_vol is None: freeze_implied_vol = self._freeze_implied_vol
if depo_tenor_for_option is None: depo_tenor_for_option = self._depo_tenor_for_option
if tot_label is None: tot_label = self._tot_label
if cal is None: cal = self._cal
if output_calculation_fields is None: output_calculation_fields = self._output_calculation_fields
if not (isinstance(cross_fx, list)):
cross_fx = [cross_fx]
total_return_index_df_agg = []
# Remove columns where there is no data (because these vols typically aren't quoted)
if market_df is not None:
market_df = market_df.dropna(how='all', axis=1)
fx_options_pricer = FXOptionsPricer(premium_output=premium_output)
def get_roll_date(horizon_d, expiry_d, asset_hols, month_adj=0):
if roll_event == 'month-end':
roll_d = horizon_d + CustomBusinessMonthEnd(roll_months + month_adj, holidays=asset_hols)
# Special case so always rolls on month end date, if specify 0 days
if roll_days_before > 0:
return (roll_d - CustomBusinessDay(n=roll_days_before, holidays=asset_hols))
elif roll_event == 'expiry-date':
roll_d = expiry_d
# Special case so always rolls on expiry date, if specify 0 days
if roll_days_before > 0:
return (roll_d - CustomBusinessDay(n=roll_days_before, holidays=asset_hols))
return roll_d
for cross in cross_fx:
if cal is None:
cal = cross
# Eg. if we specify USDUSD
if cross[0:3] == cross[3:6]:
total_return_index_df_agg.append(
pd.DataFrame(100, index=market_df.index, columns=[cross + "-option-tot.close"]))
else:
# Is the FX cross in the correct convention
old_cross = cross
cross = FXConv().correct_notation(cross)
# TODO also specification of non-standard crosses like USDGBP
if old_cross != cross:
pass
if fx_vol_surface is None:
fx_vol_surface = FXVolSurface(market_df=market_df, asset=cross,
tenors=fx_options_tenor_for_interpolation,
depo_tenor=depo_tenor_for_option)
market_df = fx_vol_surface.get_all_market_data()
horizon_date = market_df.index
expiry_date = np.zeros(len(horizon_date), dtype=object)
roll_date = np.zeros(len(horizon_date), dtype=object)
new_trade = np.full(len(horizon_date), False, dtype=bool)
exit_trade = np.full(len(horizon_date), False, dtype=bool)
has_position = np.full(len(horizon_date), False, dtype=bool)
asset_holidays = self._calendar.get_holidays(cal=cross)
# If no entry dates specified, assume we just keep rolling
if enter_trading_dates is None:
# Get first expiry date
expiry_date[0] = self._calendar.get_expiry_date_from_horizon_date(pd.DatetimeIndex([horizon_date[0]]),
fx_options_trading_tenor, cal=cal, asset_class='fx-vol')[0]
# For first month want it to expire within that month (for consistency), hence month_adj=0 ONLY here
roll_date[0] = get_roll_date(horizon_date[0], expiry_date[0], asset_holidays, month_adj=0)
# New trade => entry at beginning AND on every roll
new_trade[0] = True
exit_trade[0] = False
has_position[0] = True
# Get all the expiry dates and roll dates
# At each "roll/trade" day we need to reset them for the new contract
for i in range(1, len(horizon_date)):
has_position[i] = True
# If the horizon date has reached the roll date (from yesterday), we're done, and we have a
# new roll/trade
if (horizon_date[i] - roll_date[i - 1]).days >= 0:
new_trade[i] = True
else:
new_trade[i] = False
# If we're entering a new trade/contract (and exiting an old trade) we need to get new expiry and roll dates
if new_trade[i]:
exp = self._calendar.get_expiry_date_from_horizon_date(pd.DatetimeIndex([horizon_date[i]]),
fx_options_trading_tenor, cal=cal, asset_class='fx-vol')[0]
# Make sure we don't expire on a date in the history where there isn't market data
# It is ok for future values to expire after market data (just not in the backtest!)
if exp not in market_df.index:
exp_index = market_df.index.searchsorted(exp)
if exp_index < len(market_df.index):
exp_index = min(exp_index, len(market_df.index))
exp = market_df.index[exp_index]
expiry_date[i] = exp
roll_date[i] = get_roll_date(horizon_date[i], expiry_date[i], asset_holidays)
exit_trade[i] = True
else:
if horizon_date[i] <= expiry_date[i-1]:
# Otherwise use previous expiry and roll dates, because we're still holding same contract
expiry_date[i] = expiry_date[i-1]
roll_date[i] = roll_date[i-1]
exit_trade[i] = False
else:
exit_trade[i] = True
else:
new_trade[horizon_date.searchsorted(enter_trading_dates)] = True
has_position[horizon_date.searchsorted(enter_trading_dates)] = True
# Get first expiry date
#expiry_date[0] = \
# self._calendar.get_expiry_date_from_horizon_date(pd.DatetimeIndex([horizon_date[0]]),
# fx_options_trading_tenor, cal=cal,
# asset_class='fx-vol')[0]
# For first month want it to expire within that month (for consistency), hence month_adj=0 ONLY here
#roll_date[0] = get_roll_date(horizon_date[0], expiry_date[0], asset_holidays, month_adj=0)
# New trade => entry at beginning AND on every roll
#new_trade[0] = True
#exit_trade[0] = False
#has_position[0] = True
# Get all the expiry dates and roll dates
# At each "roll/trade" day we need to reset them for the new contract
for i in range(0, len(horizon_date)):
# If we're entering a new trade/contract (and exiting an old trade) we need to get new expiry and roll dates
if new_trade[i]:
exp = \
self._calendar.get_expiry_date_from_horizon_date(pd.DatetimeIndex([horizon_date[i]]),
fx_options_trading_tenor, cal=cal,
asset_class='fx-vol')[0]
# Make sure we don't expire on a date in the history where there isn't market data
# It is ok for future values to expire after market data (just not in the backtest!)
if exp not in market_df.index:
exp_index = market_df.index.searchsorted(exp)
if exp_index < len(market_df.index):
exp_index = min(exp_index, len(market_df.index))
exp = market_df.index[exp_index]
expiry_date[i] = exp
# roll_date[i] = get_roll_date(horizon_date[i], expiry_date[i], asset_holidays)
# if i > 0:
# Makes the assumption we aren't rolling contracts
exit_trade[i] = False
else:
if i > 0:
# Check there's valid expiry on previous day (if not then we're not in an option trade here!)
if expiry_date[i-1] == 0:
has_position[i] = False
else:
if horizon_date[i] <= expiry_date[i - 1]:
# Otherwise use previous expiry and roll dates, because we're still holding same contract
expiry_date[i] = expiry_date[i - 1]
# roll_date[i] = roll_date[i - 1]
has_position[i] = True
if horizon_date[i] == expiry_date[i]:
exit_trade[i] = True
else:
exit_trade[i] = False
# Note: may need to add discount factor when marking to market option
mtm = np.zeros(len(horizon_date))
calculated_strike = np.zeros(len(horizon_date))
interpolated_option = np.zeros(len(horizon_date))
implied_vol = np.zeros(len(horizon_date))
delta = np.zeros(len(horizon_date))
# For debugging
df_temp = pd.DataFrame()
df_temp['expiry-date'] = expiry_date
df_temp['horizon-date'] = horizon_date
df_temp['roll-date'] = roll_date
df_temp['new-trade'] = new_trade
df_temp['exit-trade'] = exit_trade
df_temp['has-position'] = has_position
if has_position[0]:
# Special case: for first day of history (given have no previous positions)
option_values_, spot_, strike_, vol_, delta_, expiry_date_, intrinsic_values_ = \
fx_options_pricer.price_instrument(cross, horizon_date[0], strike, expiry_date[0],
contract_type=contract_type,
tenor=fx_options_trading_tenor,
fx_vol_surface=fx_vol_surface,
return_as_df=False)
interpolated_option[0] = option_values_
calculated_strike[0] = strike_
implied_vol[0] = vol_
mtm[0] = 0
# Now price options for rest of history
# On rolling dates: MTM will be the previous option contract (interpolated)
# On non-rolling dates: it will be the current option contract
for i in range(1, len(horizon_date)):
if exit_trade[i]:
# Price option trade being exited
option_values_, spot_, strike_, vol_, delta_, expiry_date_, intrinsic_values_ = \
fx_options_pricer.price_instrument(cross, horizon_date[i], calculated_strike[i-1], expiry_date[i-1],
contract_type=contract_type,
tenor=fx_options_trading_tenor,
fx_vol_surface=fx_vol_surface,
return_as_df=False)
# Store as MTM
mtm[i] = option_values_
delta[i] = 0 # Note: this will get overwritten if there's a new trade
calculated_strike[i] = calculated_strike[i-1] # Note: this will get overwritten if there's a new trade
if new_trade[i]:
# Price new option trade being entered
option_values_, spot_, strike_, vol_, delta_, expiry_date_, intrinsic_values_ = \
fx_options_pricer.price_instrument(cross, horizon_date[i], strike, expiry_date[i],
contract_type=contract_type,
tenor=fx_options_trading_tenor,
fx_vol_surface=fx_vol_surface,
return_as_df=False)
calculated_strike[i] = strike_ # option_output[cross + '-strike.close'].values
implied_vol[i] = vol_
interpolated_option[i] = option_values_
delta[i] = delta_
elif has_position[i] and not(exit_trade[i]):
# Price current option trade
# - strike/expiry the same as yesterday
# - other market inputs taken live, closer to expiry
calculated_strike[i] = calculated_strike[i-1]
if freeze_implied_vol:
frozen_vol = implied_vol[i-1]
else:
frozen_vol = None
option_values_, spot_, strike_, vol_, delta_, expiry_date_, intrinsic_values_ = \
fx_options_pricer.price_instrument(cross, horizon_date[i], calculated_strike[i],
expiry_date[i],
vol=frozen_vol,
contract_type=contract_type,
tenor=fx_options_trading_tenor,
fx_vol_surface=fx_vol_surface,
return_as_df=False)
interpolated_option[i] = option_values_
implied_vol[i] = vol_
mtm[i] = interpolated_option[i]
delta[i] = delta_
# Calculate delta hedging P&L
spot_rets = (market_df[cross + ".close"] / market_df[cross + ".close"].shift(1) - 1).values
if tot_label == '':
tot_rets = spot_rets
else:
tot_rets = (market_df[cross + "-" + tot_label + ".close"]
/ market_df[cross + "-" + tot_label + ".close"].shift(1) - 1).values
# Remember to take the inverted sign, eg. if call is +20%, we need to -20% of spot to flatten delta
# Also invest for whether we are long or short the option
delta_hedging_pnl = -np.roll(delta, 1) * tot_rets * position_multiplier
delta_hedging_pnl[0] = 0
# Calculate options P&L (given option premium is already percentage, only need to subtract)
# Again need to invert if we are short option
option_rets = (mtm - np.roll(interpolated_option, 1)) * position_multiplier
option_rets[0] = 0
# Calculate option + delta hedging P&L
option_delta_rets = delta_hedging_pnl + option_rets
if cum_index == 'mult':
cum_rets = 100 * np.cumprod(1.0 + option_rets)
cum_delta_rets = 100 * np.cumprod(1.0 + delta_hedging_pnl)
cum_option_delta_rets = 100 * np.cumprod(1.0 + option_delta_rets)
elif cum_index == 'add':
cum_rets = 100 + 100 * np.cumsum(option_rets)
cum_delta_rets = 100 + 100 * np.cumsum(delta_hedging_pnl)
cum_option_delta_rets = 100 + 100 * np.cumsum(option_delta_rets)
total_return_index_df = pd.DataFrame(index=horizon_date, columns=[cross + "-option-tot.close"])
total_return_index_df[cross + "-option-tot.close"] = cum_rets
if output_calculation_fields:
total_return_index_df[cross + '-interpolated-option.close'] = interpolated_option
total_return_index_df[cross + '-mtm.close'] = mtm
total_return_index_df[cross + ".close"] = market_df[cross + ".close"].values
total_return_index_df[cross + '-implied-vol.close'] = implied_vol
total_return_index_df[cross + '-new-trade.close'] = new_trade
total_return_index_df[cross + '.roll-date'] = roll_date
total_return_index_df[cross + '-exit-trade.close'] = exit_trade
total_return_index_df[cross + '.expiry-date'] = expiry_date
total_return_index_df[cross + '-calculated-strike.close'] = calculated_strike
total_return_index_df[cross + '-option-return.close'] = option_rets
total_return_index_df[cross + '-spot-return.close'] = spot_rets
total_return_index_df[cross + '-tot-return.close'] = tot_rets
total_return_index_df[cross + '-delta.close'] = delta
total_return_index_df[cross + '-delta-pnl-return.close'] = delta_hedging_pnl
total_return_index_df[cross + '-delta-pnl-index.close'] = cum_delta_rets
total_return_index_df[cross + '-option-delta-return.close'] = option_delta_rets
total_return_index_df[cross + '-option-delta-tot.close'] = cum_option_delta_rets
total_return_index_df_agg.append(total_return_index_df)
return self._calculations.join(total_return_index_df_agg, how='outer')
def apply_tc_signals_to_total_return_index(self, cross_fx, total_return_index_orig_df, option_tc_bp, spot_tc_bp, signal_df=None, cum_index=None):
# TODO signal not implemented yet
if cum_index is None: cum_index = self._cum_index
total_return_index_df_agg = []
if not (isinstance(cross_fx, list)):
cross_fx = [cross_fx]
option_tc = option_tc_bp / (2 * 100 * 100)
spot_tc = spot_tc_bp / (2 * 100 * 100)
total_return_index_df = total_return_index_orig_df.copy()
for cross in cross_fx:
# p = abs(total_return_index_df[cross + '-roll.close'].shift(1)) * option_tc
# q = abs(total_return_index_df[cross + '-delta.close'] - total_return_index_df[cross + '-delta.close'].shift(1)) * spot_tc
# Additional columns to include P&L with transaction costs
total_return_index_df[cross + '-option-return-with-tc.close'] = \
total_return_index_df[cross + '-option-return.close'] - abs(total_return_index_df[cross + '-new-trade.close'].shift(1)) * option_tc
total_return_index_df[cross + '-delta-pnl-return-with-tc.close'] = \
total_return_index_df[cross + '-delta-pnl-return.close'] \
- abs(total_return_index_df[cross + '-delta.close'] - total_return_index_df[cross + '-delta.close'].shift(1)) * spot_tc
total_return_index_df[cross + '-option-return-with-tc.close'][0] = 0
total_return_index_df[cross + '-delta-pnl-return-with-tc.close'][0] = 0
total_return_index_df[cross + '-option-delta-return-with-tc.close'] = \
total_return_index_df[cross + '-option-return-with-tc.close'] + total_return_index_df[cross + '-delta-pnl-return-with-tc.close']
if cum_index == 'mult':
cum_rets = 100 * np.cumprod(1.0 + total_return_index_df[cross + '-option-return-with-tc.close'].values)
cum_delta_rets = 100 * np.cumprod(1.0 + total_return_index_df[cross + '-delta-pnl-return-with-tc.close'].values)
cum_option_delta_rets = 100 * np.cumprod(
1.0 + total_return_index_df[cross + '-option-delta-return-with-tc.close'].values)
elif cum_index == 'add':
cum_rets = 100 + 100 * np.cumsum(total_return_index_df[cross + '-option-return-with-tc.close'].values)
cum_delta_rets = 100 + 100 * np.cumsum(
total_return_index_df[cross + '-delta-pnl-return-with-tc.close'].values)
cum_option_delta_rets = 100 + 100 * np.cumsum(
total_return_index_df[cross + '-option-delta-return-with-tc.close'].values)
total_return_index_df[cross + "-option-tot-with-tc.close"] = cum_rets
total_return_index_df[cross + '-delta-pnl-index-with-tc.close'] = cum_delta_rets
total_return_index_df[cross + '-option-delta-tot-with-tc.close'] = cum_option_delta_rets
total_return_index_df_agg.append(total_return_index_df)
return self._calculations.join(total_return_index_df_agg, how='outer')
| apache-2.0 |
ashhher3/pylearn2 | pylearn2/scripts/train.py | 34 | 8573 | #!/usr/bin/env python
"""
Script implementing the logic for training pylearn2 models.
This is a "driver" that we recommend using for all but the most unusual
training experiments.
Basic usage:
.. code-block:: none
train.py yaml_file.yaml
The YAML file should contain a pylearn2 YAML description of a
`pylearn2.train.Train` object (or optionally, a list of Train objects to
run sequentially).
See `doc/yaml_tutorial` for a description of how to write the YAML syntax.
The following environment variables will be locally defined and available
for use within the YAML file:
- `PYLEARN2_TRAIN_BASE_NAME`: the name of the file within the directory
(`foo/bar.yaml` -> `bar.yaml`)
- `PYLEARN2_TRAIN_DIR`: the directory containing the YAML file
(`foo/bar.yaml` -> `foo`)
- `PYLEARN2_TRAIN_FILE_FULL_STEM`: the filepath with the file extension
stripped off.
`foo/bar.yaml` -> `foo/bar`)
- `PYLEARN2_TRAIN_FILE_STEM`: the stem of `PYLEARN2_TRAIN_BASE_NAME`
(`foo/bar.yaml` -> `bar`)
- `PYLEARN2_TRAIN_PHASE` : set to `phase0`, `phase1`, etc. during iteration
through a list of Train objects. Not defined for a single train object.
These environment variables are especially useful for setting the save
path. For example, to make sure that `foo/bar.yaml` saves to `foo/bar.pkl`,
use
.. code-block:: none
save_path: "${PYLEARN2_TRAIN_FILE_FULL_STEM}.pkl"
This way, if you copy `foo/bar.yaml` to `foo/bar2.yaml`, the output of
`foo/bar2.yaml` won't overwrite `foo/bar.pkl`, but will automatically save
to foo/bar2.pkl.
For example configuration files that are consumable by this script, see
- `pylearn2/scripts/tutorials/grbm_smd`
- `pylearn2/scripts/tutorials/dbm_demo`
- `pylearn2/scripts/papers/maxout`
Use `train.py -h` to see an auto-generated description of advanced options.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
# Standard library imports
import argparse
import gc
import logging
import os
# Third-party imports
import numpy as np
# Disable the display for the plot extension to work
# An alternative is to create another training script
if os.getenv('DISPLAY') is None:
try:
import matplotlib
matplotlib.use('Agg')
except:
pass
# Local imports
from pylearn2.utils import serial
from pylearn2.utils.logger import (
CustomStreamHandler, CustomFormatter, restore_defaults
)
class FeatureDump(object):
"""
.. todo::
WRITEME
Parameters
----------
encoder : WRITEME
dataset : WRITEME
path : WRITEME
batch_size : WRITEME
topo : WRITEME
"""
def __init__(self, encoder, dataset, path, batch_size=None, topo=False):
"""
.. todo::
WRITEME
"""
self.encoder = encoder
self.dataset = dataset
self.path = path
self.batch_size = batch_size
self.topo = topo
def main_loop(self, **kwargs):
"""
.. todo::
WRITEME
Parameters
----------
**kwargs : dict, optional
WRITEME
"""
if self.batch_size is None:
if self.topo:
data = self.dataset.get_topological_view()
else:
data = self.dataset.get_design_matrix()
output = self.encoder.perform(data)
else:
myiterator = self.dataset.iterator(mode='sequential',
batch_size=self.batch_size,
topo=self.topo)
chunks = []
for data in myiterator:
chunks.append(self.encoder.perform(data))
output = np.concatenate(chunks)
np.save(self.path, output)
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser(
description="Launch an experiment from a YAML configuration file.",
epilog='\n'.join(__doc__.strip().split('\n')[1:]).strip(),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--level-name', '-L',
action='store_true',
help='Display the log level (e.g. DEBUG, INFO) '
'for each logged message')
parser.add_argument('--timestamp', '-T',
action='store_true',
help='Display human-readable timestamps for '
'each logged message')
parser.add_argument('--time-budget', '-t', type=int,
help='Time budget in seconds. Stop training at '
'the end of an epoch if more than this '
'number of seconds has elapsed.')
parser.add_argument('--verbose-logging', '-V',
action='store_true',
help='Display timestamp, log level and source '
'logger for every logged message '
'(implies -T).')
parser.add_argument('--debug', '-D',
action='store_true',
help='Display any DEBUG-level log messages, '
'suppressed by default.')
parser.add_argument('config', action='store',
choices=None,
help='A YAML configuration file specifying the '
'training procedure')
return parser
def train(config, level_name=None, timestamp=None, time_budget=None,
verbose_logging=None, debug=None):
"""
Trains a given YAML file.
Parameters
----------
config : str
A YAML configuration file specifying the
training procedure.
level_name : bool, optional
Display the log level (e.g. DEBUG, INFO)
for each logged message.
timestamp : bool, optional
Display human-readable timestamps for
each logged message.
time_budget : int, optional
Time budget in seconds. Stop training at
the end of an epoch if more than this
number of seconds has elapsed.
verbose_logging : bool, optional
Display timestamp, log level and source
logger for every logged message
(implies timestamp and level_name are True).
debug : bool, optional
Display any DEBUG-level log messages,
False by default.
"""
train_obj = serial.load_train_file(config)
try:
iter(train_obj)
iterable = True
except TypeError:
iterable = False
# Undo our custom logging setup.
restore_defaults()
# Set up the root logger with a custom handler that logs stdout for INFO
# and DEBUG and stderr for WARNING, ERROR, CRITICAL.
root_logger = logging.getLogger()
if verbose_logging:
formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(levelname)s "
"%(message)s")
handler = CustomStreamHandler(formatter=formatter)
else:
if timestamp:
prefix = '%(asctime)s '
else:
prefix = ''
formatter = CustomFormatter(prefix=prefix, only_from='pylearn2')
handler = CustomStreamHandler(formatter=formatter)
root_logger.addHandler(handler)
# Set the root logger level.
if debug:
root_logger.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
if iterable:
for number, subobj in enumerate(iter(train_obj)):
# Publish a variable indicating the training phase.
phase_variable = 'PYLEARN2_TRAIN_PHASE'
phase_value = 'phase%d' % (number + 1)
os.environ[phase_variable] = phase_value
# Execute this training phase.
subobj.main_loop(time_budget=time_budget)
# Clean up, in case there's a lot of memory used that's
# necessary for the next phase.
del subobj
gc.collect()
else:
train_obj.main_loop(time_budget=time_budget)
if __name__ == "__main__":
"""
See module-level docstring for a description of the script.
"""
parser = make_argument_parser()
args = parser.parse_args()
train(args.config, args.level_name, args.timestamp, args.time_budget,
args.verbose_logging, args.debug)
| bsd-3-clause |
choderalab/perses | perses/analysis/analyse_sams_convergence.py | 1 | 1762 | import matplotlib.pyplot as plt
import os
import sys
from glob import glob
from perses.analysis import utils
if __name__ == '__main__':
directory = sys.argv[1]
files = sorted(glob(os.path.join(os.getcwd(), directory, '*.nc')))
files = [x for x in files if 'checkpoint' not in x]
f, axarr = plt.subplots(2, 3, sharex=False, sharey=False, figsize=(16, 8))
for i, filename in enumerate(files):
phase = filename.split('-')[1].rstrip('.nc')
ncfile = utils.open_netcdf(filename)
t0 = ncfile.groups['online_analysis'].variables['t0']
logZ = ncfile.groups['online_analysis'].variables['logZ_history']
n_iterations, n_states = logZ.shape
axarr[i, 0].plot(logZ, '.')
axarr[i, 0].set_xlabel('iteration')
axarr[i, 0].set_ylabel('logZ / kT')
axarr[i, 0].set_title('%s_%s' % (phase, directory))
ymin, ymax = axarr[i, 0].get_ylim()
axarr[i,0].vlines(t0,ymin,ymax,linestyles='--',color='grey')
states = ncfile.variables['states']
n_iterations, n_replicas = states.shape
axarr[i, 1].plot(states, '.')
axarr[i, 1].set_xlabel('iteration')
axarr[i, 1].set_ylabel('thermodynamic state')
axarr[i, 1].axis([0, n_iterations, 0, n_states])
ymin, ymax = axarr[i, 1].get_ylim()
axarr[i,1].vlines(t0,ymin,ymax,linestyles='--',color='grey')
gamma = ncfile.groups['online_analysis'].variables['gamma_history']
axarr[i, 2].plot(gamma, '.')
axarr[i, 2].set_xlabel('iteration')
axarr[i, 2].set_ylabel('gamma')
ymin, ymax = axarr[i, 2].get_ylim()
axarr[i,2].vlines(t0,ymin,ymax,linestyles='--',color='grey')
f.tight_layout()
f.savefig('%s.png' % directory, dpi=300)
| mit |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/testing/__init__.py | 10 | 3767 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
from contextlib import contextmanager
from matplotlib.cbook import is_string_like, iterable
from matplotlib import rcParams, rcdefaults, use
def _is_list_like(obj):
"""Returns whether the obj is iterable and not a string"""
return not is_string_like(obj) and iterable(obj)
# stolen from pandas
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not _is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
def set_font_settings_for_testing():
rcParams['font.family'] = 'DejaVu Sans'
rcParams['text.hinting'] = False
rcParams['text.hinting_factor'] = 8
def setup():
# The baseline images are created in this locale, so we should use
# it during all of the tests.
import locale
import warnings
from matplotlib.backends import backend_agg, backend_pdf, backend_svg
try:
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, str('English_United States.1252'))
except locale.Error:
warnings.warn(
"Could not set locale to English/United States. "
"Some date-related tests may fail")
use('Agg', warn=False) # use Agg backend for these tests
# These settings *must* be hardcoded for running the comparison
# tests and are not necessarily the default values as specified in
# rcsetup.py
rcdefaults() # Start with all defaults
set_font_settings_for_testing()
| apache-2.0 |
dr-guangtou/hs_galspec | manga/cframe/mgCFrameRead.py | 1 | 2529 | #!/usr/bin/env python
# Filename : mgCFrameRead.py
import numpy
import os
from astropy.io import fits
from matplotlib import pyplot as plt
"""
Data model for mgCFrame file:
HDU[0] = Empty, only used to store header
HDU[1] = Flux [NPixels,NFiber], in Unit of 10^(-17) erg/s/cm^2/Ang/Fiber
HDU[2] = InverseVariance of the flux; [NPixels,NFiber][=1/sigma^2]
HDU[3] = Pixel Mask [NPixels,NFiber]
HDU[4] = Wavelength [NPixels] [Vacuum]
HDU[5] = WavelengthDispersion [NPixels,NFiber]
HDU[6] = SlitMap
HDU[7] = SkyFlux: [NPixels,Nfiber]
Filename: mgCFrame-00177379-LIN.fits.gz
No. Name Type Cards Dimensions Format
0 PRIMARY PrimaryHDU 173 ()
1 FLUX ImageHDU 12 (6732, 1423) float32
2 IVAR ImageHDU 12 (6732, 1423) float32
3 MASK ImageHDU 12 (6732, 1423) int32
4 WAVE ImageHDU 11 (6732,) float64
5 DISP ImageHDU 12 (6732, 1423) float64
6 SLITMAP BinTableHDU 175 1423R x 33C [12A, J, J, 8A, J, J, 8A, J, J, J, E, J, K, J, 5A, J, 3A, J, J, J, D, D, D, D, D, D, E, E, E, J, J, 5E, 9A]
7 SKY ImageHDU 12 (6732, 1423) float32
"""
class CFrame:
def __init__(self, name):
if not os.path.exists(name):
raise Exception("Can not find the mgCFrame file: " + name)
self.name = name.strip()
self.hdulist = fits.open(self.name)
self.npixel = -1
self.nfiber = -1
self.minwave = -1
self.maxwave = -1
def listHDU(self):
self.hdulist.info()
def listHeader(self):
header = self.hdulist[0].header
print header
return header
def getHeader(self):
return self.hdulist[0].header
def getWave(self):
wave = self.hdulist[4].data
self.minwave = wave.min()
self.maxwave = wave.max()
return wave
def getFlux(self):
flux = self.hdulist[1].data
self.npixel = flux.shape[1]
self.nfiber = flux.shape[0]
return flux
def getIvar(self):
return self.hdulist[2].data
def getError(self):
return numpy.sqrt(1.0 / self.hdulist[2].data)
def getMask(self):
return self.hdulist[3].data
def getWdisp(self):
return self.hdulist[5].data
def getSlitMap(self):
return self.hdulist[6].data
def getSky(self):
return self.hdulist[7].data
def getFluxOri(self):
return (self.hdulist[1].data + self.hdulist[7].data)
| bsd-3-clause |
MSeifert04/astropy | astropy/visualization/wcsaxes/tests/test_formatter_locator.py | 7 | 22451 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from matplotlib import rc_context
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.visualization.wcsaxes.formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
AngleFormatterLocator(values=[1., 2.], number=5, spacing=5. * u.deg)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1., 14.] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1., 14.])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35., 40., 45., 50., 55.])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 34.75, 35., 35.25, 35.5, 35.75, 36.])
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_spacing(self):
with pytest.raises(TypeError) as exc:
AngleFormatterLocator(spacing=3.)
assert exc.value.args[0] == "spacing should be an astropy.units.Quantity instance with units of angle"
fl = AngleFormatterLocator(spacing=3. * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 30. * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35., 35.5, 36.])
with pytest.warns(UserWarning, match=r'Spacing is too small'):
fl.format = 'dd'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35., 36.])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [36., 37., 38.,
39., 41., 42., 43., 44., 46., 47., 48., 49., 51.,
52., 53., 54.])
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(('format', 'string'), [('dd', '15\xb0'),
('dd:mm', '15\xb024\''),
('dd:mm:ss', '15\xb023\'32"'),
('dd:mm:ss.s', '15\xb023\'32.0"'),
('dd:mm:ss.ssss', '15\xb023\'32.0316"'),
('hh', '1h'),
('hh:mm', '1h02m'),
('hh:mm:ss', '1h01m34s'),
('hh:mm:ss.s', '1h01m34.1s'),
('hh:mm:ss.ssss', '1h01m34.1354s'),
('d', '15\xb0'),
('d.d', '15.4\xb0'),
('d.dd', '15.39\xb0'),
('d.ddd', '15.392\xb0'),
('m', '924\''),
('m.m', '923.5\''),
('m.mm', '923.53\''),
('s', '55412"'),
('s.s', '55412.0"'),
('s.ss', '55412.03"'),
])
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.formatter([15.392231] * u.degree, None, format='ascii')[0] == string
@pytest.mark.parametrize(('separator', 'format', 'string'), [(('deg', "'", '"'), 'dd', '15deg'),
(('deg', "'", '"'), 'dd:mm', '15deg24\''),
(('deg', "'", '"'), 'dd:mm:ss', '15deg23\'32"'),
((':', "-", 's'), 'dd:mm:ss.s', '15:23-32.0s'),
(':', 'dd:mm:ss.s', '15:23:32.0'),
((':', ":", 's'), 'hh', '1:'),
(('-', "-", 's'), 'hh:mm:ss.ssss', '1-01-34.1354s'),
(('d', ":", '"'), 'd', '15\xb0'),
(('d', ":", '"'), 'd.d', '15.4\xb0'),
])
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == '15\xb023\'32"'
with rc_context(rc={'text.usetex': True}):
assert fl.formatter([15.392231] * u.degree, None)[0] == "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
@pytest.mark.parametrize(('format'), ['x.xxx', 'dd.ss', 'dd:ss', 'mdd:mm:ss'])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('dd', 1. * u.deg),
('dd:mm', 1. * u.arcmin),
('dd:mm:ss', 1. * u.arcsec),
('dd:mm:ss.ss', 0.01 * u.arcsec),
('hh', 15. * u.deg),
('hh:mm', 15. * u.arcmin),
('hh:mm:ss', 15. * u.arcsec),
('hh:mm:ss.ss', 0.15 * u.arcsec),
('d', 1. * u.deg),
('d.d', 0.1 * u.deg),
('d.dd', 0.01 * u.deg),
('d.ddd', 0.001 * u.deg),
('m', 1. * u.arcmin),
('m.m', 0.1 * u.arcmin),
('m.mm', 0.01 * u.arcmin),
('s', 1. * u.arcsec),
('s.s', 0.1 * u.arcsec),
('s.ss', 0.01 * u.arcsec),
])
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
with pytest.warns(UserWarning, match=r'Spacing is not a multiple of base spacing'):
fl.format = 'dd:mm:ss'
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = 'd.dddd'
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(fl.locator(266.9730, 266.9750)[0],
[17.79825, 17.79830] * u.hourangle)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[15., 20., 25., 30., 35.] * u.arcmin)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.hourangle, decimal=False)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[60., 75., 90., 105., 120., 135.] * (15 * u.arcsec))
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = 'dd:mm:ss'
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(('spacing', 'string'), [(2 * u.deg, '15\xb0'),
(2 * u.arcmin, '15\xb024\''),
(2 * u.arcsec, '15\xb023\'32"'),
(0.1 * u.arcsec, '15\xb023\'32.0"')])
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(('format_unit', 'decimal', 'show_decimal_unit', 'spacing', 'ascii', 'latex'),
[(u.degree, False, True, 2 * u.degree, '15\xb0', r'$15^\circ$'),
(u.degree, False, True, 2 * u.arcmin, '15\xb024\'', r'$15^\circ24{}^\prime$'),
(u.degree, False, True, 2 * u.arcsec, '15\xb023\'32"', r'$15^\circ23{}^\prime32{}^{\prime\prime}$'),
(u.degree, False, True, 0.1 * u.arcsec, '15\xb023\'32.0"', r'$15^\circ23{}^\prime32.0{}^{\prime\prime}$'),
(u.hourangle, False, True, 15 * u.degree, '1h', r'$1^\mathrm{h}$'),
(u.hourangle, False, True, 15 * u.arcmin, '1h02m', r'$1^\mathrm{h}02^\mathrm{m}$'),
(u.hourangle, False, True, 15 * u.arcsec, '1h01m34s', r'$1^\mathrm{h}01^\mathrm{m}34^\mathrm{s}$'),
(u.hourangle, False, True, 1.5 * u.arcsec, '1h01m34.1s', r'$1^\mathrm{h}01^\mathrm{m}34.1^\mathrm{s}$'),
(u.degree, True, True, 15 * u.degree, '15\xb0', r'$15\mathrm{^\circ}$'),
(u.degree, True, True, 0.12 * u.degree, '15.39\xb0', r'$15.39\mathrm{^\circ}$'),
(u.degree, True, True, 0.0036 * u.arcsec, '15.392231\xb0', r'$15.392231\mathrm{^\circ}$'),
(u.arcmin, True, True, 15 * u.degree, '924\'', r'$924\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.12 * u.degree, '923.5\'', r'$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.1 * u.arcmin, '923.5\'', r'$923.5\mathrm{^\prime}$'),
(u.arcmin, True, True, 0.0002 * u.arcmin, '923.5339\'', r'$923.5339\mathrm{^\prime}$'),
(u.arcsec, True, True, 0.01 * u.arcsec, '55412.03"', r'$55412.03\mathrm{^{\prime\prime}}$'),
(u.arcsec, True, True, 0.001 * u.arcsec, '55412.032"', r'$55412.032\mathrm{^{\prime\prime}}$'),
(u.mas, True, True, 0.001 * u.arcsec, '55412032mas', r'$55412032\mathrm{mas}$'),
(u.degree, True, False, 15 * u.degree, '15', '15'),
(u.degree, True, False, 0.12 * u.degree, '15.39', '15.39'),
(u.degree, True, False, 0.0036 * u.arcsec, '15.392231', '15.392231'),
(u.arcmin, True, False, 15 * u.degree, '924', '924'),
(u.arcmin, True, False, 0.12 * u.degree, '923.5', '923.5'),
(u.arcmin, True, False, 0.1 * u.arcmin, '923.5', '923.5'),
(u.arcmin, True, False, 0.0002 * u.arcmin, '923.5339', '923.5339'),
(u.arcsec, True, False, 0.01 * u.arcsec, '55412.03', '55412.03'),
(u.arcsec, True, False, 0.001 * u.arcsec, '55412.032', '55412.032'),
(u.mas, True, False, 0.001 * u.arcsec, '55412032', '55412032'),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(u.arcsec, None, True, 0.01 * u.arcsec, '55412.03"', r'$55412.03\mathrm{^{\prime\prime}}$')])
def test_formatter_no_format_with_units(self, format_unit, decimal, show_decimal_unit, spacing, ascii, latex):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(unit=u.degree, format_unit=format_unit, decimal=decimal, show_decimal_unit=show_decimal_unit)
assert fl.formatter([15.392231] * u.degree, spacing, format='ascii')[0] == ascii
assert fl.formatter([15.392231] * u.degree, spacing, format='latex')[0] == latex
def test_incompatible_unit_decimal(self):
with pytest.raises(UnitsError) as exc:
AngleFormatterLocator(unit=u.arcmin, decimal=False)
assert exc.value.args[0] == 'Units should be degrees or hours when using non-decimal (sexagesimal) mode'
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
with pytest.raises(ValueError) as exc:
ScalarFormatterLocator(values=[1., 2.] * u.m, number=5, spacing=5. * u.m)
assert exc.value.args[0] == "At most one of values/number/spacing can be specifed"
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1., 14.] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1., 14.]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1., 14.])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36., 54., 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3. * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3. * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36., 39., 42., 45., 48., 51., 54.])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35., 35.5, 36.])
with pytest.warns(UserWarning, match=r'Spacing is too small'):
fl.format = 'x'
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35., 36.])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(minor_values.value, [36., 37., 38., 39., 41., 42.,
43., 44., 46., 47., 48., 49., 51., 52., 53., 54.])
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1., 14.] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(('format', 'string'), [('x', '15'),
('x.x', '15.4'),
('x.xx', '15.39'),
('x.xxx', '15.392'),
('%g', '15.3922'),
('%f', '15.392231'),
('%.2f', '15.39'),
('%.3f', '15.392')])
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format', 'string'), [('x', '1539'),
('x.x', '1539.2'),
('x.xx', '1539.22'),
('x.xxx', '1539.223')])
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(('format'), ['dd', 'dd:mm', 'xx:mm', 'mx.xxx'])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError) as exc:
fl.format = format
assert exc.value.args[0] == "Invalid format: " + format
@pytest.mark.parametrize(('format', 'base_spacing'), [('x', 1. * u.m),
('x.x', 0.1 * u.m),
('x.xxx', 0.001 * u.m)])
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
with pytest.warns(UserWarning, match=r'Spacing is not a multiple of base spacing'):
fl.format = 'x.xx'
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(fl.locator(850, 2150)[0],
[1000., 1200., 1400., 1600., 1800., 2000.] * u.cm)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = 'x.x'
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
| bsd-3-clause |
jss-emr/openerp-7-src | openerp/addons/resource/faces/timescale.py | 15 | 3899 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mhdella/deeppy | setup.py | 16 | 2509 | #!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as f:
install_requires = [l.strip() for l in f]
version = None
regex = re.compile(r'''^__version__ = ['"]([^'"]*)['"]''')
with open(os.path.join('deeppy', '__init__.py')) as f:
for line in f:
mo = regex.search(line)
if mo is not None:
version = mo.group(1)
break
if version is None:
raise RuntimeError('Could not find version number')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import subprocess
subprocess.call(['py.test'] + self.pytest_args + ['test'])
class Coverage(Command):
description = 'Generate a test coverage report.'
user_options = [('report=', 'r', 'Report type (report/html)')]
def initialize_options(self):
self.report = 'report'
def finalize_options(self):
pass
def run(self):
import subprocess
subprocess.call(['coverage', 'run', '--source=deeppy', '-m', 'py.test',
'test'])
subprocess.call(['coverage', self.report])
setup(
name='deeppy',
version=version,
author='Anders Boesen Lindbo Larsen',
author_email='abll@dtu.dk',
description='Deep learning in Python',
license='MIT',
url='http://compute.dtu.dk/~abll',
packages=find_packages(exclude=['doc', 'examples', 'test']),
install_requires=install_requires,
long_description=read('README.md'),
cmdclass={
'test': PyTest,
'coverage': Coverage,
},
extras_require={
'test': ['pytest', 'sklearn'],
'coverage': ['pytest', 'sklearn', 'coverage'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
| mit |
bvnayak/image_recognition | recognition/classification.py | 1 | 2384 | import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import *
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import utils
import math
# histogram intersection kernel
def histogramIntersection(M, N):
m = M.shape[0]
n = N.shape[0]
result = np.zeros((m,n))
for i in range(m):
for j in range(n):
temp = np.sum(np.minimum(M[i], N[j]))
result[i][j] = temp
return result
# classify using SVM
def SVM_Classify(trainDataPath, trainLabelPath, testDataPath, testLabelPath, kernelType):
trainData = np.array(trainDataPath)
trainLabels = trainLabelPath
testData = np.array(testDataPath)
testLabels = testLabelPath
if kernelType == "HI":
gramMatrix = histogramIntersection(trainData, trainData)
clf = SVC(kernel='precomputed')
clf.fit(gramMatrix, trainLabels)
predictMatrix = histogramIntersection(testData, trainData)
SVMResults = clf.predict(predictMatrix)
correct = sum(1.0 * (SVMResults == testLabels))
accuracy = correct / len(testLabels)
print "SVM (Histogram Intersection): " +str(accuracy * 100)+ "% (" +str(int(correct))+ "/" +str(len(testLabels))+ ")"
else:
clf = SVC(kernel=kernelType)
clf.fit(trainData, trainLabels)
SVMResults = clf.predict(testData)
correct = sum(1.0 * (SVMResults == testLabels))
accuracy = correct / len(testLabels)
print "SVM (" +kernelType+"): " +str(accuracy)+ " (" +str(int(correct))+ "/" +str(len(testLabels))+ ")"
createConfusionMatrix(SVMResults, testLabels, kernelType)
def createConfusionMatrix(results, labels, kernel_type):
cm = confusion_matrix(labels, results)
np.set_printoptions(precision=2)
print('Confusion matrix')
print cm
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure()
plt.imshow(cm_norm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion_matrix")
plt.colorbar()
unique_labels = np.unique(labels)
tick_marks = np.arange(len(unique_labels))
plt.xticks(tick_marks, unique_labels, rotation=45)
plt.yticks(tick_marks, unique_labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig("Confusion_matrix.png", format="png", dpi=600)
| mit |
CforED/Machine-Learning | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
alpenwasser/laborjournal | versuche/skineffect/python/hohlzylinder_cu_frequenzabhaengig_approx2.py | 1 | 12509 | #!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
import numpy as np
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# Magnetic field inside copper coil with hollow copper cylinder #
# High-frequency approximation #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Init, Define Variables and Constants #
# ---------------------------------------------------------#
mu0 = 4*pi*1e-7 # vacuum permeability
rho_kuchling = 0.0172e-6 # resistivity Kuchling 17th edition, p.649, tab. 45
sigma_kuchling = 1/rho_kuchling
sigma_abs = 53e6 # de.wikipedia.org/wiki/Kupfer: 58.1e6
sigma_arg = 53e6 # de.wikipedia.org/wiki/Kupfer: 58.1e6
r1 = 30e-3 # inner radius of copper cylinder
r2 = 35e-3 # outer radius of copper cylinder
r_avg = (r1+r2)/2 # average radius of cylinder
d_rohr = r2 - r1 # wall thickness of copper cylinder
N0 = 574 # number of turns of copper coil
l = 500e-3 # length of copper coil
B0 = 6.9e-2
npts = 1e3
fmin = 1
fmax = 2500
# -----------------------------------------------------#
# NOTE: According to formula 29 on p.16, the B-Field #
# inside the cylinder (r<r1) is equal to the B-Field #
# at the inner boundary of the copper cylinder #
# (B(r1)), therefore we set r to r1 for further #
# calculations. #
# -----------------------------------------------------#
r = r1
# -----------------------------------------------------#
# Create a list for convenient printing of vars to #
# file, add LaTeX where necessary. #
# -----------------------------------------------------#
params = [
' ' + r'\textcolor{red}{$\sigma_{Fit,|\hat{B}|}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_abs) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Fit,\angle\hat{B}}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_arg) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Kuch}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_kuchling) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + '$\mu_0' + '$ & $' + '\SI{' + str(mu0) + r'}{\newton\per\ampere\squared}' + r'$\\' + "\n",
' ' + '$r' + '$ & $' + '\SI{' + str(r) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_1' + '$ & $' + '\SI{' + str(r1) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_2' + '$ & $' + '\SI{' + str(r2) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{avg}' + '$ & $' + '\SI{' + str(r_avg) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$d_{Rohr}' + '$ & $' + '\SI{' + str(d_rohr) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$N_0' + '$ & $' + r'\num{' + str(N0) + r'}' + r'$\\' + "\n",
' ' + '$l' + '$ & $' + '\SI{' + str(l) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$NPTS' + '$ & $' + r'\num{' + str(npts) + '}' + r'$\\' + "\n",
' ' + '$f_{min}' + '$ & $' + '\SI{' + str(fmin) + r'}{\hertz}' + r'$\\' + "\n",
' ' + '$f_{max}' + '$ & $' + '\SI{' + str(fmax) + r'}{\hertz}' + r'$\\' + "\n",
]
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_fit = 'blue'
plot_color_ratio = 'magenta'
plot_color_measurements = 'black'
plot_label_measurements = 'Messwerte'
plot_size_measurements = 16
plot_scale_x = 'log'
plot_label_fit = r"Fit-Funktion (N\"aherung)"
plot_label_ratio = r"$\displaystyle \frac{d_{Rohr}}{s_{skin}}$"
plot_label_ratio_y = r"$\displaystyle d_{Rohr} \div s_{skin}$"
plot_label_x = 'Frequenz (Hz)'
plot_1_label_y = 'gemessene Spannung (mV)'
plot_2_label_y = 'Phase (Grad)'
plot_1_title = r"N\"aherungsl\"osung f\"ur numerische Probleme: Betrag Magnetfeld, Spule mit Kupferrohr"
plot_2_title = r"N\"aherungsl\"osung f\"ur numerische Probleme: Phase Magnetfeld, Spule mit Kupferrohr"
# ---------------------------------------------------- #
# Current in copper coil. This is a scaling parameter, #
# not the measured value. measured value was: 200 mA #
# This is due to the fact that the measurement values #
# are voltages representing the B-Field, not the #
# actual B-Field itself. #
# ---------------------------------------------------- #
I0 = 48.5
# ---------------------------------------------------------#
# Functions #
# #
# See formula 29 on p.16 of script for experiment. #
# #
# NOTE: We use frequency f instead of angular frequency #
# omega since that is what we actually set on the function #
# generator. #
# NOTE: We evaluate B_abs and B_arg based on two different #
# values for sigma, which allows to fit each of the curves #
# more accurately. #
# ---------------------------------------------------------#
k_abs = lambda f: sqrt((2*np.pi*f*mu0*sigma_abs)/2)*(mpc(1,-1))
k_arg = lambda f: sqrt((2*np.pi*f*mu0*sigma_arg)/2)*(mpc(1,-1))
u1_abs = lambda f: mpc(0,1) * k_abs(f) * r1
u1_arg = lambda f: mpc(0,1) * k_arg(f) * r1
u2_abs = lambda f: mpc(0,1) * k_abs(f) * r2
u2_arg = lambda f: mpc(0,1) * k_arg(f) * r2
u_abs = lambda f: mpc(0,1) * k_abs(f) * r
u_arg = lambda f: mpc(0,1) * k_arg(f) * r
enum_abs = lambda f:(
(u1_abs(f)/2 + 1) * exp(u_abs(f) - u1_abs(f)) - (u1_abs(f)/2 - 1) * exp(-u_abs(f) + u1_abs(f))
)
denom_abs = lambda f:(
(u1_abs(f)/2 + 1) * exp(u2_abs(f) - u1_abs(f)) - (u1_abs(f)/2 - 1) * exp(-u2_abs(f) + u1_abs(f))
)
enum_arg = lambda f:(
(u1_arg(f)/2 + 1) * exp(u_arg(f) - u1_arg(f)) - (u1_arg(f)/2 - 1) * exp(-u_arg(f) + u1_arg(f))
)
denom_arg = lambda f:(
(u1_arg(f)/2 + 1) * exp(u2_arg(f) - u1_arg(f)) - (u1_arg(f)/2 - 1) * exp(-u2_arg(f) + u1_arg(f))
)
B_abs = lambda f: abs(enum_abs(f) / denom_abs(f) * B0)
B_arg = lambda f: arg(enum_arg(f) / denom_arg(f) * B0)
# ---------------------------------------------------------#
# Generate points for frequency axis #
# ---------------------------------------------------------#
n = np.linspace(1,npts,npts)
expufunc = np.frompyfunc(exp,1,1)
frequency_vector = fmin*expufunc(n*log(fmax-fmin)/npts)
# ---------------------------------------------------------#
# Numerically evaluate functions #
# ---------------------------------------------------------#
Babsufunc = np.frompyfunc(B_abs,1,1)
B_abs_num = Babsufunc(frequency_vector)
Bargufunc = np.frompyfunc(B_arg,1,1)
B_arg_num = Bargufunc(frequency_vector)
#s_skin_ufunc = np.frompyfunc(s_skin,1,1)
#s_skin_num = s_skin_ufunc(frequency_vector)
#s_skin_ratio_num = d_rohr / s_skin_num # should be < 1 for validity
#print(s_skin_ratio_num)
#print(B_abs_num)
#exit()
# ---------------------------------------------------------#
# Unfortunately, the arg() function only delivers values #
# between -pi and +pi for the angle of a complex number, #
# which, while correct, is not suitable for pretty #
# plotting, so we will shift the values larger then zero #
# accordingly for a continuous curve. #
# ---------------------------------------------------------#
B_arg_num = np.unwrap(B_arg_num)
# ---------------------------------------------------------#
# Measurement Values from experiment #
# ---------------------------------------------------------#
frequencies_measured = np.array([ 1, 10, 20, 40, 80, 120, 160, 200, 400, 600, 800, 1000, 1200, 1500])
phases_degrees = np.array([ 2, 19.2, 35.2, 56.7, 76.7, 87, 94, 100, 121, 140, 155, 170, 180, 200])
voltages = np.array([ 7e-2, 6.6e-2, 5.78e-2, 4.18e-2, 2.44e-2, 1.69e-2, 1.27e-2, 1e-2, 4.8e-3, 2.9e-3, 1.9e-3, 1.4e-3, 1e-3, 7e-4])
# ---------------------------------------------------------#
# Scale values for improved legibility in plot #
# ---------------------------------------------------------#
B_abs_num = 1e3 * B_abs_num
voltages = 1e3 * voltages
B_arg_num = 180/np.pi*B_arg_num
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
fig = figure(1)
axes1 = fig.add_subplot(211)
axes1.plot(frequency_vector,B_abs_num,color=plot_color_fit,label=plot_label_fit)
axes1.scatter(frequencies_measured,
voltages,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes1.set_xlim([fmin*0.9,fmax*1.1])
axes1.set_xscale(plot_scale_x)
axes1.set_xlabel(plot_label_x,fontdict=font)
axes1.set_ylabel(plot_1_label_y,fontdict=font)
axes1.set_title(plot_1_title,fontdict=titlefont)
axes1.legend(fontsize=plot_legend_fontsize)
axes1.tick_params(labelsize=9)
axes2 = fig.add_subplot(212)
axes2.plot(frequency_vector,B_arg_num,color=plot_color_fit,label=plot_label_fit)
axes2.scatter(frequencies_measured,
-phases_degrees,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes2.set_xlim([fmin*0.9,fmax*1.1])
axes2.set_xscale(plot_scale_x)
axes2.set_xlabel(plot_label_x,fontdict=font)
axes2.set_ylabel(plot_2_label_y,fontdict=font)
axes2.set_title(plot_2_title,fontdict=titlefont)
axes2.legend(fontsize=plot_legend_fontsize,loc='center left')
axes2.tick_params(labelsize=9)
fig.subplots_adjust(bottom=0.1,left=0.1,right=0.9,top=0.95,hspace=0.5)
fig.savefig('plots-pgf/hollow--cu--freq--approx2.pgf')
fig.savefig('plots-pdf/hollow--cu--freq--approx2.pdf')
# ---------------------------------------------------------#
# Save listing to file #
# ---------------------------------------------------------#
dumpfile = open('listings/hollow--cu--freq--approx2.tex', 'w')
table_opening = r"""
{%
\begin{center}
\captionof{table}{%
Parameterwerte f\"ur Fit-Funktion in Abbildung~\ref{fig:cu:freq:approx2}
}
\label{tab:fitparams:cu:freq:approx2}
\sisetup{%
%math-rm=\mathtt,
scientific-notation=engineering,
table-format = +3.2e+2,
round-precision = 3,
round-mode = figures,
}
\begin{tabular}{lr}
\toprule
"""
table_closing = r"""
\bottomrule
\end{tabular}
\end{center}
}
"""
dumpfile.writelines(table_opening)
for line in params:
dumpfile.writelines(line)
dumpfile.writelines(table_closing)
dumpfile.close()
# ---------------------------------------------------------#
# Save Value of sigma to file for error analysis #
# ---------------------------------------------------------#
np.savetxt('numpy-txt/hollow--cu--freq--approx2.txt',([sigma_abs,sigma_arg]))
| mit |
carrillo/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/sparse/frame.py | 1 | 37597 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
import warnings
import numpy as np
from pandas._libs.sparse import BlockIndex, get_blocks
import pandas.compat as compat
from pandas.compat import lmap
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.core.dtypes.cast import find_common_type, maybe_upcast
from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseArray, SparseDtype
import pandas.core.common as com
from pandas.core.frame import DataFrame
import pandas.core.generic as generic
from pandas.core.index import Index, MultiIndex, ensure_index
import pandas.core.indexes.base as ibase
from pandas.core.internals import (
BlockManager, create_block_manager_from_arrays)
from pandas.core.internals.construction import extract_index, prep_ndarray
import pandas.core.ops as ops
from pandas.core.series import Series
from pandas.core.sparse.series import SparseSeries
# pylint: disable=E1101,E1103,W0231,E0202
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index,
columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(self._default_fill_value,
index=index, kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
keys = com.dict_keys_to_ordered_list(data)
columns = Index(keys)
if index is None:
index = extract_index(list(data.values()))
def sp_maker(x):
return SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
if index is not None and len(v) != len(index):
msg = "Length of passed values is {}, index implies {}"
raise ValueError(msg.format(len(v), len(index)))
sdict[k] = v
if len(columns.difference(sdict)):
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = SparseArray(nan_arr, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=False)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
"""
Init self from ndarray or list of lists.
"""
data = prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
"""
Init self from scipy.sparse matrix.
"""
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = ibase.default_index(N)
if columns is None:
columns = ibase.default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
"""
Original pickle format
"""
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in compat.iteritems(self)}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
"""
Get new SparseDataFrame applying func to each columns
"""
new_data = {col: func(series)
for col, series in compat.iteritems(self)}
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
for _, ser in compat.iteritems(self))
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
if level is not None:
raise NotImplementedError("'level' argument is not supported")
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
new_fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None):
new_data = {}
if level is not None:
raise NotImplementedError("'level' argument is not supported")
this, other = self.align(other, join='outer', axis=0, level=level,
copy=False)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
fill_value = self._get_op_result_fill_value(other, func)
return self._constructor(
new_data, index=this.index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if level is not None:
raise NotImplementedError("'level' argument is not supported")
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
new_data = {}
for col in left.columns:
new_data[col] = func(left[col], float(right[col]))
return self._constructor(
new_data, index=left.index, columns=left.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func):
return self._apply_columns(lambda x: func(x, other))
def _get_op_result_fill_value(self, other, func):
own_default = self.default_fill_value
if isinstance(other, DataFrame):
# i.e. called from _combine_frame
other_default = getattr(other, 'default_fill_value', np.nan)
# if the fill values are the same use them? or use a valid one
if own_default == other_default:
# TOOD: won't this evaluate as False if both are np.nan?
fill_value = own_default
elif np.isnan(own_default) and not np.isnan(other_default):
fill_value = other_default
elif not np.isnan(own_default) and np.isnan(other_default):
fill_value = own_default
else:
fill_value = None
elif isinstance(other, SparseSeries):
# i.e. called from _combine_match_index
# fill_value is a function of our operator
if isna(other.fill_value) or isna(own_default):
fill_value = np.nan
else:
fill_value = func(np.float64(own_default),
np.float64(other.fill_value))
else:
raise NotImplementedError(type(other))
return fill_value
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
for (a, val) in compat.iteritems(reindexers)}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_codes = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_codes = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
codes=[major_codes, minor_codes],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame)
ops.add_special_arithmetic_methods(SparseDataFrame)
| bsd-3-clause |
paztronomer/kepler_tools | UncertSine_mcmc_v01.py | 1 | 14619 | # Script to estimate uncertainties in sine fit to a Kepler light curve
# If use/modify/distribute, refer to: Francisco Paz-Chinchon,
# francisco at dfte.ufrn.br
# DFTE, UFRN, Brazil.
import emcee
from pandas import *
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq #least squares optimization
import scipy.optimize as op # optimization
from sys import exit
import time
import os
import gc
#
# F u n c t i o n s
#
# Add prefix to kic
def Kname(idk):
idk = str(idk)
if len(idk) < 9:
control = True
aux_id = idk
while control:
if len(aux_id) == 9: control = False
else: aux_id = '0' + aux_id
return 'kplr' + aux_id
else:
return 'kplr' + idk
# Read LC and estimate sigma_{LC-ppalSignal}
def Read(gname, vargs):
prot, ampl, nsLev = vargs # expand
# read lc
TS = read_csv(gname, sep=' ', names=['time','nflux'])
tjd, pdc = TS['time'].values, TS['nflux'].values
TS = None
# estimate sigma_{lc-ppalSignal}
# 1st) optimize parameters
guess_a, guess_b = 0.0, np.mean(pdc)
optimize_func = lambda x: ampl*np.sin(tjd*2.0*np.pi*(1./prot)+x[0]) + x[1] - pdc
est_a, est_b = leastsq(optimize_func, [guess_a, guess_b])[0]
phi, off = est_a, est_b
est_a, est_b, guess_a, guess_b = None, None, None, None
# 2nd) estimate sigma
dd = pdc - (ampl*np.sin(tjd*2.0*np.pi*(1./prot)+phi) + off); SS = np.std(dd)
dd = None
# 3rd) add the noise contribution
SS = SS + nsLev
return tjd, pdc, SS, phi, off
#.......................................................
#
# C o r p u s
#
print '\n\tstarting!....'
global_t = time.time()
# Path to light curves
# path_lc = '/var/run/media/fj/Data/tesis.phd/2014/KOI-Dec13/pdc_s05_UNtreat/'
path_lc = '/Users/fj/Work/tesis.phd/2014/ConfirPlanet-May20/pdcM20/s01tos04_treat/'
# define number of walkers and number of iterations
Nit = 1000
Wks = 200
THREADS = 1
# L o a d tables
#
# 1) Table of uncertainties
# 01_KIC,02_pdc_err,03_normpdc_err,04_time_corr
# this table has the error in flux for sample s04 + sample KOI Dec2013... so all stars that we
# possible need
e_tab = read_csv('s04d13.r05_ErrorFITS.csv')
#
e_tab['05_kic_number'] = map(lambda x: int(x[ x.find('kplr')+4: ]), e_tab['01_KIC'].values)
# 2) Table of period and other info
# 01_kic,02_period,03_amplitude,04_power,05_peak,06_noiseLev,07_snr
# info_tab = read_csv('final_KOInonPlan_all_v03.csv')
#
# 01_kic,02_period,03_amplitude,04_power,05_peak,06_noiseLev,07_snr,08_Msun,09_Rsun,10_teff
info_tab = read_csv('final_Plan_fromKCPl_v02.csv')
# List to harbor acceptance fraction
# Note: the acceptance fraction is like a diagnosis, which must be have values between
# 0.2 and 0.5
af = [ [],[],[] ]
# List to harbor varios parameters
comp=[[],[],[],[],[], [],[],[],[],[], [],[],[],[]]
# Walk through ids
for ind_kic,kic in enumerate( info_tab['01_kic'].values ):
start_time = time.time()
# free memory
gc.collect()
# get values from tables for the actual KIC
period = info_tab['02_period'].values[ind_kic]
ampl = info_tab['03_amplitude'].values[ind_kic]
nsLev = info_tab['06_noiseLev'].values[ind_kic]
e_flux = e_tab.loc[ (e_tab['05_kic_number'] == kic), '03_normpdc_err'].values[0]
corr_time = e_tab.loc[ (e_tab['05_kic_number'] == kic), '04_time_corr'].values[0]
# Walk through files up to match light curve id
for (path, dirs, files) in os.walk( path_lc ):
for ind_FF, FF in enumerate(files):
kID = map(Kname, [kic])[0]
if ('.dat' in FF) and ( kID in FF ):
print '\n.........................'
print '\t{0:^12}'.format(kID)
# returns: time_arr, flux_arr, sigma_total, phase, offset
tjd, flux, ss_total, ph, delta = Read( path_lc+FF, [period,ampl,nsLev])
# M a x i m u m l i k e l i h o o d
# making the chi-squared statistics as small as possible
# optimize.minimize(function, x0, args=(extra_arguments_to_function), ...)
# chi2 returns -2.*lnlike
# Likelihood function
def lnlike(vargs, x, y, yerr):
A, freq, lnf = vargs
#phi, offset = rho
model = A * np.sin(2.0*np.pi*freq*x + ph) + delta
inv_sigma2 = 1.0/(yerr**2 + np.exp(2*lnf))
# note that exp(2 ln f)==f**2
return -0.5*(np.sum( (y-model)**2*inv_sigma2 - np.log(inv_sigma2) ))
print 'fix it!!!! : np.log(ss_total)----------------------- it must be a ratio, not the total sigma'
exit(0)
chi2 = lambda *args: -2.0 * lnlike(*args)
vaux = [ampl, 1./period, np.log(ss_total)] # list
# ASI FUNCIONA BIEN MINIMIZE
result = op.minimize( chi2, vaux, args=(tjd, flux, e_flux), \
options={'disp': True}, method = 'Nelder-Mead')
# Nelder-Mead: unconstrained minimization using Simplex algorithm (no 1st or 2nd derivate)
# The outputs of maximum likelihood are passed as a list: 'OptimizeResult' with various
# methods: x::shows results; sucess::whether or not optim sucess; message::descript of
# cause of termination...
ampl_ml, freq_ml, lnf_ml = result.x
print '\t\t::: elapsed time : {0} min'.format((time.time()-start_time)/60.)
# Diagnosis plot: fit vs ML
if False:
plt.close('all')
fig = plt.figure(); ax1 = fig.add_subplot(211); ax2 = fig.add_subplot(212)
ax1.plot(tjd, flux,'b', color='0.7')
ax1.plot(tjd, ampl*np.sin(2.0*np.pi*(1./period)*tjd+ph)+delta, 'r-', label='Fit')
ax1.plot(tjd, ampl_ml*np.sin(2.0*np.pi*freq_ml*tjd+ph)+delta, 'g-', label='ML')
ax2.plot(tjd, (ampl*np.sin(2.0*np.pi*(1./period)*tjd+ph))-\
(ampl_ml*np.sin(2.0*np.pi*freq_ml*tjd+ph)), 'k,')
plt.show()
# END: maximum likelihood
# Prior-probability function
def lnprior(vargs):
# lnprior=0 means prior=1.0
# note: np.log(0.00005)=-9.9; np.log(1.0)=0.0
A, freq, lnf = vargs
if (0.5*ampl < A < 2.0*ampl) and (0.5*(1./period) < freq < 2.0*(1./period)) and ((np.log(0.5)+np.log(ss_total)) < lnf < (np.log(1.5)+np.log(ss_total))): #(phi-np.pi < phi < phi+np.pi)
return 0.0
else:
return -np.inf
# Full-probability function
def lnprob(vargs, x, y, yerr):
lp = lnprior(vargs) # prior
# if prior is infinite, the lnprob (full log-probability) will
# be infinite (negative), so probability zero.
if not np.isfinite(lp):
return -np.inf
# if the prior is finite, the full log-probability is the prior
# plus the likelihood. Remember is a log-prob.
else:
return lp + lnlike(vargs, x, y, yerr)
# M o n t e C M C
# 1) Initialize the walkers in a tiny Gaussian ball around the ML result
ndim = 3 # number of variables to use: amplitude, period and ss_total
perturb = e_flux+ss_total # perturbation added to gaussian random noise
pos = [ result.x + perturb*np.random.randn(ndim) for i in range(Wks) ]
# briefly...
# result.x + e_flux*np.random.randn(ndim) : array of shape (ndim,)
# pos is a list of arrays with length=ndim. pos=[ array[x,y,z], array[xx,yy,zz],...]
# 2) Setup the modified Metropolis-Hasting sampler
# with: N of walkers, ndim, full-log-prob, light-curve-data
sampler = emcee.EnsembleSampler( Wks, ndim, lnprob, \
args=(tjd, flux, e_flux), threads=THREADS )
# Arguments:
# nwalkers (Goodman&Weare walkers), dim, lnpostfn (log-posterior probability),
# a=2.0 (proposal scale parameter), args=[] (list of extra positional arguments for
# lnpostfn, it will called as lnpostfn(p,*args,**kwargs)), kwargs={} (list of extra
# arguments for lnpostfn, it will called as lnpostfn(p,*args,**kwargs)), postargs=None,
# threads=1 (number of threads used in parallel calls to lnpostfn), pool=None,
# live_dangerously=False, runtime_sortingfn=None
#
# Methods:
# - acceptance_fraction (array length:Wks, of the fraction of steps accepted for each walker)
# - acor (estimate of autocorr time for each parameter, length:ndim)
# - chain (Markov chain array of shape(Wks, iterations, ndim))
# - get_autocorr_time(window=50, fast=False) (estimate of autocorr time for each
# parameter, legth:ndim. window--size of the windowing function, equivalent to
# the maximum number of lags to use)
# - get_lnprob(p) (return log-probability at the given position)
# - run_mcmc(pos0,N,rstate0=None,lnprob0=None,**kwargs) (iterate sample() for N
# iterations and return result. pos0:initial posit vector, N:number of steps,
# lnprob0:the log-posterior proba at p0, if not, the initial value is called, ...)
# - sample(p0, lnprob0=None, rstate0=None, blobs0=None, iterations=1, thin=1,
# storechain=True, mh_proposal=None) (advance the chain iterations steps as a generator)
# ...and more....
sampler.run_mcmc(pos, Nit) # tiny gaussian ball is the zero-position
# Reshape samples array
cut_point= 0
# remember: [ nwalkers, ndim, lnprob ]
samples = sampler.chain[:, cut_point:, :].reshape((-1, ndim))
print '\n--- Finished MCMC core.\n\t\t::: elapsed time (global): {0:^15} min \n\t\t\t({1:^15} min)'.format((time.time()-start_time)/60., (time.time()-global_t)/60.)
print 'Median/Mean of acceptance fraction: {0} / {1}'.format(np.median(sampler.acceptance_fraction), np.mean(sampler.acceptance_fraction))
# Save results:
# - numpy array of period mcmc samples for each KIC
# - median and mean of acceptance fraction
# - few statistics
# Save samples of rotational period and amplitude of variation
fn_prot = 'npy_files/prot_'+kID+'_N'+str(int(Nit))+'Walk'+str(int(Wks))+'.npy'
np.save(fn_prot, 1.0/samples[:,1])
#
fn_ampl = 'npy_files/ampl_'+kID+'_N'+str(int(Nit))+'Walk'+str(int(Wks))+'.npy'
np.save(fn_ampl, samples[:,0])
# kic , median and mean of acceptance fraction
af[0].append(kic)
af[1].append(np.median(sampler.acceptance_fraction))
af[2].append(np.mean(sampler.acceptance_fraction))
# samples:: Ampl, Freq, lnf
samples[:, 2] = np.exp(samples[:, 2])
# 1st and 3rd quartile statistics
tmp = zip(*np.percentile(samples, [25,50,75], axis=0))
A_mc, freq_mc, ss_mc = map(lambda x: (x[1]-x[0], x[1], x[2]-x[1]), tmp)
prot_tmp = 1.0/samples[:,1]
aux_prot = np.percentile(prot_tmp, [25,50,75], axis=0)
# stacking...
comp[0].append(kic)
comp[1].append(period)
comp[2].append(aux_prot[1]) # median of mcmc period
comp[3].append(aux_prot[0]) # 1st quartile of mcmc period
comp[4].append(aux_prot[2]) # 3rd quartile of mcmc period
comp[5].append(prot_tmp.std()) # stdev of mcmc period
comp[6].append(ampl )
comp[7].append(A_mc[1]) # median of mcmc amplitude
comp[8].append(A_mc[0]) # 1st quartile of mcmc amplitude
comp[9].append(A_mc[2]) # 3rd quartile of mcmc amplitude
comp[10].append(samples[:,0].std()) #stdev of mcmc amplitude
comp[11].append(ss_total)
comp[12].append(ss_mc[1]) # median of mcmc LC noise
comp[13].append(samples[:,2].std()) # stdev of mcmc LC noise
print '\n\t....passing to the next LC\n_____________________'
# CLOSE: walk through files
# CLOSE: walk through kics
# Write results to file
# wr_dict = {'01_kic':comp[0][:],'02_prot':comp[1][:],'03_prot_mc':comp[2][:],'04_prot_q1':comp[3][:],\
# '05_prot_q3':comp[4][:],'06_prot_std':comp[5][:], '07_ampl':comp[6][:],'08_ampl_mc':comp[7][:],\
# '09_ampl_q1':comp[8][:],'10_ampl_q3':comp[9][:],'11_ampl_std':comp[10][:],'12_noise':comp[11][:],\
# '13_noise_mc':comp[12][:],'14_noise_std':comp[13][:],'15_af_median':af[1][:],'16_af_mean':af[2][:]}
# DataFrame(wr_dict).to_csv('final_mcmcErr_KOI_v02_N'+str(int(Nit))+'Walk'+str(int(Wks))+'.csv', index=False, header=True)
wr_dict = {'01_kic':comp[0][:],'02_prot':comp[1][:],'03_prot_mc':comp[2][:],'04_prot_q1':comp[3][:],\
'05_prot_q3':comp[4][:],'06_prot_std':comp[5][:], '07_ampl':comp[6][:],'08_ampl_mc':comp[7][:],\
'09_ampl_q1':comp[8][:],'10_ampl_q3':comp[9][:],'11_ampl_std':comp[10][:],'12_noise':comp[11][:],\
'13_noise_mc':comp[12][:],'14_noise_std':comp[13][:],'15_af_median':af[1][:],'16_af_mean':af[2][:]}
DataFrame(wr_dict).to_csv('final_mcmcErr_Plan_v02_N'+str(int(Nit))+'Walk'+str(int(Wks))+'.csv', index=False, header=True)
print '\n\n\t succesful finished! :-)'
| mit |
altairpearl/scikit-learn | benchmarks/bench_isolation_forest.py | 46 | 3782 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
xbenjox/CryptoTrade | dataui.py | 1 | 3760 | from tkinter import *
import matplotlib
import numpy as np
from os import listdir
from os.path import isfile, join
from lxml import etree as ET
import math
class DataUI(Toplevel):
markets = list()
doge_data = list()
def __init__(self, parent, c_api):
Toplevel.__init__(self, parent)
self.c = c_api
self.market_data = self.c.markets()
self.transient(parent)
self.geometry("800x600+50+50")
self.get_markets_xml()
self.get_history_xml()
self.createWidgets()
self.updateWidgets()
return
def createWidgets(self):
mroot = self.markettree.getroot()
self.Close = Button(self)
self.Close["text"] = "Get Markets"
self.Close["command"] = lambda: self.GetMarkets()
self.Close.grid({"row": "0", "columnspan":"1"})
x = 1
for market in mroot.findall('market'):
self.lblDogeData = Label(self)
self.lblDogeData['text'] = market.text
self.lblDogeData.grid({"row":x})
self.btnDogeCollect = Button(self)
self.btnDogeCollect['text'] = "Collect Data"
self.btnDogeCollect['command'] = lambda: self.CollectData(market.get('id'), "day")
self.btnDogeCollect.grid({"row":x, "column":"1"})
x += 1
self.Close = Button(self)
self.Close["text"] = "Close"
self.Close["fg"] = "red"
self.Close["command"] = self._quit
self.Close.grid({"row": "5", "columnspan":"1"})
return
def updateWidgets(self):
return
def on_key_event(self, event):
print('you pressed %s'%event.key)
return
def _quit(self):
self.destroy()
return
def GetMarkets(self):
markets = self.c.markets()
print(markets['data'])
return
def CollectData(self,
mid: int,
t: str):
root = self.tree.getroot()
for market in root.findall('market'):
#print(mid)
#print(market.get('id'))
if market.get('id') == str(mid):
data = self.c.market_ohlc(mid, interval=t, )
#clear old data
for time in market.findall('time'):
market.remove(time)
for sample in data['data']:
print(sample)
date = sample['date']
timestamp = sample['timestamp']
high = sample['high']
low = sample['low']
open = sample['open']
close = sample['close']
volume = sample['volume']
element = ET.Element('date')
element.set('date', date)
ET.SubElement(element, 'timestamp').text = str(timestamp)
ET.SubElement(element, 'high').text = str(high)
ET.SubElement(element, 'low').text = str(low)
ET.SubElement(element, 'open').text = str(open)
ET.SubElement(element, 'close').text = str(close)
ET.SubElement(element, 'volume').text = str(volume)
market.append(element)
self.tree.write('Data/Markets/markets_day_hist.xml', pretty_print=True)
return
def get_history_xml(self):
parser = ET.XMLParser(remove_blank_text=True)
self.tree = ET.parse('Data/Markets/markets_day_hist.xml', parser)
root = self.tree.getroot()
for market in root.findall('market'):
print(market.get('label'))
return
def get_markets_xml(self):
parser = ET.XMLParser(remove_blank_text=True)
self.markettree = ET.parse('Data/markets.xml', parser)
root = self.markettree.getroot()
for market in root.findall('market'):
print(market.get('id'))
return | lgpl-3.0 |
ligovirgo/gwdetchar | gwdetchar/scattering/tests/test_plot.py | 1 | 1814 | # -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019)
#
# This file is part of the GW DetChar python package.
#
# GW DetChar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GW DetChar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gwdetchar. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwdetchar.scattering.plot`
"""
import os
import numpy
import shutil
from gwpy.timeseries import TimeSeries
from matplotlib import use
use('Agg')
# backend-dependent import
from .. import plot # noqa: E402
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
# global test objects
TWOPI = 2 * numpy.pi
TIMES = numpy.arange(0, 16384 * 64)
NOISE = TimeSeries(
numpy.random.normal(loc=1, scale=.5, size=16384 * 64),
sample_rate=16384, epoch=-32).zpk([], [0], 1)
FRINGE = TimeSeries(
numpy.cos(TWOPI * TIMES), sample_rate=16384, epoch=-32)
DATA = NOISE.inject(FRINGE)
QSPECGRAM = DATA.q_transform(logf=True)
# -- make sure plots run end-to-end -------------------------------------------
def test_spectral_comparison(tmpdir):
outdir = str(tmpdir)
plot1 = os.path.join(outdir, 'test1.png')
plot2 = os.path.join(outdir, 'test2.png')
# test plotting
plot.spectral_comparison(0, QSPECGRAM, FRINGE, plot1)
plot.spectral_overlay(0, QSPECGRAM, FRINGE, plot2)
# clean up
shutil.rmtree(outdir, ignore_errors=True)
| gpl-3.0 |
nhejazi/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
jupsal/schmies-jTEM | jeremy/Test4Plotting.py | 1 | 1876 | ###########################################################################
# This file holds the plotting routine for the KP solution from the Schmiesy
# Thesie. There is no timestepping, it plots only the initial condition.
############################################################################
import matplotlib.pyplot as plt
from plotting.plotKPSoln import createPlot
from parseKPData import *
import os
# Choose the local filestructure based on what computer we are using. This
# should point to the main folder for jTEM stuff/jeremy (because of the way the
# following command works it's easier to do this)
#
globalFileStructure = os.path.dirname(os.path.realpath(__file__))
globalFileStructure += '/'
# For example, for the desktop in office, this should give
# # globalFileStructure = ('/home/jeremy/Documents/research/'
# # 'RiemannSurfaces/jTEM-Jeremy/jeremy/')
## We should only have to change the next two parameters for each test.
# localFileStructure tells us where the data is
localFileStructure = ('plotting/data/Test4/')
# Number of examples
numExamples = 1;
def main():
# Loop over all examples
for exampleNum in xrange(0,numExamples+1):
print exampleNum # to keep track of progress
cFileName, sFileName, gFileName = defFileNames( globalFileStructure,
localFileStructure, exampleNum );
coordData, solnData, groupData = loadData( cFileName, sFileName,
gFileName )
x, y, z = parseSolnData( coordData, solnData );
plotFilename = ( globalFileStructure + localFileStructure +
'ExampleNum' + str(exampleNum) + '.eps')
createPlot( x, y, z, groupData, plotFilename,
exampleNum = str(exampleNum) )
# Show all the plots at the end.
plt.show()
if __name__ == '__main__':
main()
| bsd-2-clause |
Thomsen22/MissingMoney | Peak Load Reserve - EU system/PLR_optclass.py | 1 | 8145 | # Python standard modules
import numpy as np
import gurobipy as gb
import networkx as nx
from collections import defaultdict
import pandas as pd
# Own modules
import loaddataEU as data
import PLR_opt as plrmodel
class expando(object):
'''
# A class for capacity market clearing
'''
pass
class PLRMarket:
def __init__(self):
self.data = expando()
self.variables = expando()
self.constraints = expando()
self._load_data()
self._build_model()
def optimize(self):
self.model.optimize()
def _load_data(self):
self.data.generators = pd.read_csv('generators.csv', sep=',', encoding='latin-1').set_index('ID')
self.data.consumption = data.load()
self.data.network = data.load_network()
self.data.nodes = self.data.network.nodes()
self.data.df_lines = data.load_lines()
self.data.approach = 'Approach1' # Energinet = Approach1
self.data.model = 'Swedish' # 'Swedish'
self.data.plrdemand = 0.25
self.data.times = np.arange(len(self.data.consumption.index))
self.data.country = self.data.generators['country'].unique().tolist()
self.data.zones = self.data.country
self.data.timeperiod = 'Year' #'Weak'
self.data.BidType = 'Fixed'
self.data.reservemargin = 1.15
self.data.windreserve = 0.06
self.data.lines = list(self.data.df_lines.index)
self.data.flow, self.data.df_price_DA, self.data.df_windprod, self.data.df_solarprod = plrmodel.DayAheadMarket() # Runs DA market clearing
self.data.cost = plrmodel.PLR(self.data.timeperiod, self.data.approach) # Finds the PLR bids
# Assigning each node to a country (price-area, zone)
country = nx.get_node_attributes(self.data.network, 'country')
country = pd.Series(country, name='Zone')
country = country.reset_index()
country = country.rename(columns={'index': 'Node'})
self.data.countries = country
# Using defaultdict
zones_nodes = country[['Zone','Node']].values.tolist()
self.data.nodes_for_zones = defaultdict(list)
for Node,Zone in zones_nodes:
self.data.nodes_for_zones[Node].append(Zone)
# Connection between zones (tells which zones are connected to each other, like defaultdict)
self.data.zonecons = {}
for z in self.data.zones:
for l in self.data.lines:
if l[0] == z:
self.data.zonecons.setdefault(z,[]).append(l[1])
elif l[1] == z:
self.data.zonecons.setdefault(z,[]).append(l[0])
# Assigning load to each node and time (and zonal consumption)
times = self.data.times
nodes = self.data.nodes
consumption = self.data.consumption
zones = self.data.zones
# Assigning load to each node and time (and zonal consumption)
self.data.nodalconsumption = {}
for t in times:
for n in np.arange(len(nodes)):
self.data.nodalconsumption[consumption.columns[n], t] = consumption.ix[consumption.index[t], consumption.columns[n]]
self.data.zonalconsumption = {}
for t in times:
for z in zones:
self.data.zonalconsumption[z,t] = sum(self.data.nodalconsumption[n,t] for n in self.data.nodes_for_zones[z])
self.data.df_zonalconsumption = pd.DataFrame(index = times, data = {z: [self.data.zonalconsumption[z,t] for t in times] for z in zones})
self.data.peakload = {}
for z in zones:
self.data.peakload[z] = self.data.df_zonalconsumption[z].max()
# Assigning generators to specific zone
country_generator = self.data.generators[['country','name']].values.tolist()
self.data.gens_for_country = defaultdict(list)
for country, generator in country_generator:
self.data.gens_for_country[country].append(generator)
# Demand for PLR using the Swedish data
self.data.zonaldemand = {}
for z in zones:
if self.data.model == 'Swedish':
self.data.zonaldemand[z] = (self.data.peakload[z] * self.data.plrdemand)
def _build_model(self):
self.model = gb.Model()
self._build_variables()
self._build_objective()
self._build_constraints()
def _build_variables(self):
m = self.model
generators = self.data.generators
zones = self.data.zones
lines = self.data.lines
lineinfo = self.data.df_lines
# Capacity variable (mothballed generators also participates)
self.variables.gcap = {}
for g in generators.index:
self.variables.gcap[g] = m.addVar(lb = 0, ub = generators['capacity'][g])
# PLR demand
self.variables.demand = {}
for z in zones:
self.variables.demand[z] = m.addVar(lb = self.data.zonaldemand[z], ub = gb.GRB.INFINITY)
# Dispatched status of generators
self.variables.dispatch = {}
for g in generators.index:
self.variables.dispatch[g] = m.addVar(vtype = gb.GRB.BINARY)
# The linelimits between zones are inserted
self.variables.linelimit = {}
for l in lines:
self.variables.linelimit[l] = m.addVar(lb=-lineinfo['capacity'][l]*0.5, ub=lineinfo['capacity'][l]*0.5)
# Export variable from each zone
self.variables.export = {}
for z in zones:
self.variables.export[z] = m.addVar(lb = -gb.GRB.INFINITY, ub = gb.GRB.INFINITY)
m.update()
def _build_objective(self):
dispatch = self.variables.dispatch
df_cost = self.data.cost
self.model.setObjective(
gb.quicksum(df_cost['PLRbid'][g] * dispatch[g] for g in df_cost.index)
,gb.GRB.MINIMIZE)
def _build_constraints(self):
m = self.model
zones = self.data.zones
gens_for_zones = self.data.gens_for_country
gcap = self.variables.gcap
demand = self.variables.demand
generators = self.data.generators
dispatch = self.variables.dispatch
export = self.variables.export
lines = self.data.lines
linelimit = self.variables.linelimit
# Power Balance constraint in each zone
self.constraints.powerbalance = {}
for z in zones:
self.constraints.powerbalance[z] = m.addConstr(
gb.quicksum(gcap[g] for g in gens_for_zones[z])
,gb.GRB.EQUAL,
demand[z] + export[z])
# Dispatched status for each generator
self.constraints.dispatch = {}
for g in generators.index:
self.constraints.dispatch[g] = m.addConstr(
generators.capacity[g] * dispatch[g],
gb.GRB.EQUAL, gcap[g])
# Export constraint from each zone
self.constraints.exporting = {}
for z in zones:
self.constraints.exporting[z] = m.addConstr(
export[z], gb.GRB.EQUAL,
gb.quicksum(linelimit[l] for l in lines if l[0] == z) - gb.quicksum(linelimit[l] for l in lines if l[1] == z))
# Hydro constraint (cannot be a PLR)
self.constraints.hydro = {}
for g in generators.index:
if generators['primaryfuel'][g] == 'Hydro':
self.constraints.hydro[g] = m.addConstr(
generators.capacity[g] * dispatch[g],
gb.GRB.EQUAL, 0)
| gpl-3.0 |
ClinicalGraphics/scikit-image | doc/examples/edges/plot_circular_elliptical_hough_transform.py | 6 | 4826 | """
========================================
Circular and Elliptical Hough Transforms
========================================
The Hough transform in its simplest form is a `method to detect
straight lines <http://en.wikipedia.org/wiki/Hough_transform>`__
but it can also be used to detect circles or ellipses.
The algorithm assumes that the edge is detected and it is robust against
noise or missing points.
Circle detection
================
In the following example, the Hough transform is used to detect
coin positions and match their edges. We provide a range of
plausible radii. For each radius, two circles are extracted and
we finally keep the five most prominent candidates.
The result shows that coin positions are well-detected.
Algorithm overview
------------------
Given a black circle on a white background, we first guess its
radius (or a range of radii) to construct a new circle.
This circle is applied on each black pixel of the original picture
and the coordinates of this circle are voting in an accumulator.
From this geometrical construction, the original circle center
position receives the highest score.
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))
# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract two circles
num_peaks = 2
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Draw the most prominent 5 circles
image = color.gray2rgb(image)
for idx in np.argsort(accums)[::-1][:5]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (220, 20, 20)
ax.imshow(image, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Ellipse detection
=================
In this second example, the aim is to detect the edge of a coffee cup.
Basically, this is a projection of a circle, i.e. an ellipse.
The problem to solve is much more difficult because five parameters have to be
determined, instead of three for circles.
Algorithm overview
------------------
The algorithm takes two different points belonging to the ellipse. It assumes
that it is the main axis. A loop on all the other points determines how much
an ellipse passes to them. A good match corresponds to high accumulator values.
A full description of the algorithm can be found in reference [1]_.
References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
method." Pattern Recognition, 2002. Proceedings. 16th International
Conference on. Vol. 2. IEEE, 2002
"""
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
# Load picture, convert to grayscale and detect edges
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.55, high_threshold=0.8)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
min_size=100, max_size=120)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(edges)
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
Michal-Fularz/decision_tree | decision_trees/datasets/digits_raw.py | 1 | 4649 | from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn import svm, metrics
from decision_trees.datasets.dataset_base import DatasetBase
def sample_from_scikit():
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.data.reshape((n_samples, -1))
# We learn the digits on the first half of the digits
classifier = svm.SVC(gamma=0.001)
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples // 2:]
predicted = classifier.predict(data[n_samples // 2:])
print('Classification report for classifier %s:\n%s\n'
% (classifier, metrics.classification_report(expected, predicted)))
print('Confusion matrix:\n%s' % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
class DigitsRaw(DatasetBase):
def __init__(self, number_of_train_samples: int, number_of_test_samples: int):
self._number_of_train_samples = number_of_train_samples
self._number_of_test_samples = number_of_test_samples
def load_data(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
digits = datasets.load_digits()
# print(digits.data.shape)
# print(digits.target.shape)
# print(np.unique(digits.target))
# data has to be flatten (8x8 image -> 64x1 matrix)
data = digits.data.reshape((len(digits.data), -1))
# print(len(data))
data = self._normalise(data)
train_data = data[:self._number_of_train_samples]
train_target = digits.target[:self._number_of_train_samples]
test_data = data[
self._number_of_train_samples:
self._number_of_train_samples+self._number_of_test_samples
]
test_target = digits.target[
self._number_of_train_samples:
self._number_of_train_samples+self._number_of_test_samples
]
return train_data, train_target, test_data, test_target
@staticmethod
def _normalise(data: np.ndarray):
# in case of digits data it is possible to just divide each data by maximum value
# each feature is in range 0-16
data = data / 16
return data
def test_digits_raw():
#####################################
# SET THE FOLLOWING PARAMETERS
# DIGITS DATABASE
# total number of samples: 1797 (each is 8x8)
number_of_train_samples = 1700
number_of_test_samples = 1797 - number_of_train_samples
# END OF PARAMETERS SETTING
# sanity check
if (number_of_train_samples + number_of_test_samples) > 1797:
print('ERROR, too much samples set!')
#####################################
d = DigitsRaw(number_of_train_samples, number_of_test_samples)
for i in range(1, 9):
d.test_as_classifier(i, './../../data/vhdl/')
assert True
def main():
pass
# d = DigitsRaw(1500, 297)
#
# train_data, train_target, test_data, test_target = d.load_data()
# print(f'train_data.shape: {train_data.shape}')
# print(f'np.unique(test_target): {np.unique(test_target)}')
#
# d.test_as_classifier(1, './../../data/vhdl/')
if __name__ == '__main__':
# sample_from_scikit()
# test_digits_raw()
main()
| mit |
hippke/TTV-TDV-exomoons | create_figures/create_figure_4a.py | 1 | 7168 | """n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 10000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print(transit_duration)
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_io
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_eur
secondmoon.px = 0.66956576 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.08
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
# Output information
print('TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]')
print('TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]')
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparisons
plt.xlim(-0.07, +0.07)
plt.ylim(-0.2, +0.2)
plt.annotate(r"2:1", xy=(-0.065, +0.16), size=16)
#plt.show()
plt.savefig("fig_4a.eps", bbox_inches = 'tight')
| mit |
sergiy-evision/math-algorithms | sf-crime/main.py | 1 | 3238 | from __future__ import division
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import KFold
from sklearn.cross_validation import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pandas
def get_k_score(x, cv, y):
res = []
for k in range(14, 15):
neigh = KNeighborsClassifier(n_neighbors=k)
arr = cross_val_score(neigh, x, y, cv=cv)
a = round(arr.sum() / len(arr), 2)
print arr
res.append(a)
return np.array(res)
def cross_score_validate(classifier, x, y):
# kf = KFold(len(y), n_folds=5, shuffle=True, random_state=42)
kf = KFold(len(y))
arr = cross_val_score(classifier, x, y, cv=kf)
a = round(arr.sum() / len(arr), 2)
print a
def columns_to_dictionary(src, column_names):
for column_name in column_names:
dates = {v: k for k, v in dict(enumerate(pandas.unique(src[column_name].ravel()))).items()}
print 'total {} {}'.format(column_name, len(dates))
src[column_name] = src[column_name].map(dates)
return src
def pre_process_data(src, transform=True):
y = []
if 'Category' in src.columns:
columns_to_dictionary(src, ['Category'])
y = src['Category']
columns = ['Address', 'PdDistrict', 'DayOfWeek']
# src.Dates = src.Dates.str.split(' ').str.get(1)
columns_to_dictionary(src, columns)
# print src.head()
src['Day'] = src['Dates'].dt.day
src['Month'] = src['Dates'].dt.month
src['Year'] = src['Dates'].dt.year
src['Hour'] = src['Dates'].dt.hour
src['WeekOfYear'] = src['Dates'].dt.weekofyear
x = src[['Address', 'PdDistrict', 'DayOfWeek', 'Day', 'Month', 'Year', 'Hour', 'WeekOfYear']]
if transform:
scaler = StandardScaler()
x = scaler.fit_transform(x)
return x, y
def k_fold(x, y):
kf = KFold(len(y), n_folds=5, shuffle=True, random_state=42)
ans_scaled = get_k_score(x, kf, y)
print ans_scaled.max(), ans_scaled.argmax()
def k_tree(x, y):
clf = DecisionTreeClassifier(random_state=241)
clf.fit(x, y)
print clf.feature_importances_
def support_vector_classification(x, y):
svc = SVC(C=100000, kernel='linear', random_state=241)
svc.fit(x, y)
return svc
def k_neighbors(x, y):
neigh = KNeighborsClassifier(n_neighbors=40)
neigh.fit(x, y)
return neigh
def random_forest(x, y):
forest = RandomForestClassifier(n_estimators=10)
forest.fit(x, y)
return forest
if __name__ == '__main__':
crimes_train = pandas.read_csv('train.csv', parse_dates=['Dates'])
crimes_test = pandas.read_csv('test.csv', parse_dates=['Dates'])
x_train, y_train = pre_process_data(crimes_train, transform=False)
x_test, y_test = pre_process_data(crimes_test, transform=False)
algo = random_forest(x_train, y_train)
cross_score_validate(algo, x_train, y_train)
# res = algo.predict(x_test)
# x_test = pre_process_data(crimes_test)
# k_tree(x_train, y_train)
# support_vector_classification(x_train, y_train)
# k_fold(x_train, y_train)
| mit |
dismalpy/dismalpy | dismalpy/model.py | 1 | 6371 | """
Model
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
import pandas as pd
class Model(object):
"""
Model
`endog` is one of:
- a name (str)
k_endog = 1
nobs = 0
names = [name]
endog = np.zeros((k_endog,0))
- iterable of names (str)
k_endog = len(iterable)
nobs = 0
names = list(iterable)
endog = np.zeros((k_endog,0))
- iterable of data (non-str), assumed given shape = (nobs, k_endog) or (nobs,)
k_endog = np.array(iterable).shape[1] or 1
nobs = np.array(iterable).shape[0]
names = [id(iterable)_i for i in range(k_endog)]
endog = np.array(iterable).T
- ndarray of data, assumed given shape = (nobs, k_endog) or (nobs,)
k_endog = ndarray.shape[1] or 1
nobs = ndarray.shape[0]
names = [id(ndarray)_i for i in range(k_endog)]
- Pandas series
k_endog = 1
nobs = series.shape[0]
names = [series.name] or [id(series)]
- Pandas dataframe
k_endog = dataframe.shape[1]
nobs = dataframe.shape[0]
names = series.columns
"""
endog = None
nobs = None
_dates = None
def __init__(self, endog, nobs=None, *args, **kwargs):
# Single endogenous variable; name provided
if type(endog) == str:
self._endog_names = [endog]
self.k_endog = 1
self.nobs = None
endog = None
# Many endogenous variables; names and data (pandas DataFrame) provided
elif isinstance(endog, pd.DataFrame):
self._endog_names = endog.columns.tolist()
self.nobs, self.k_endog = endog.shape
# Single endogenous variable; names and data (pandas Series) provided
elif isinstance(endog, pd.Series):
self._endog_names = [endog.name] if endog.name is not None else [id(endog)]
self.k_endog = 1
self.nobs = endog.shape[0]
# Other provided; assumed to be an iterable
else:
# Coerce to an array
endog = np.asarray(endog)
# Many endogenous variables; names provided
if np.issubdtype(endog.dtype, str):
self._endog_names = list(endog)
self.k_endog = len(self._endog_names)
self.nobs = None
endog = None
# Many endogenous variables; data provided, names auto-generated
else:
endog_id = id(endog)
if endog.ndim == 1:
self.nobs = endog.shape[0]
self.k_endog = 1
else:
self.nobs, self.k_endog = endog.shape
self._endog_names = ['%s_%s' % (endog_id, i)
for i in range(self.k_endog)]
# We may already know `nobs`, even if we don't want to bind to data yet
if nobs is not None:
if not self.nobs is None and not self.nobs == nobs:
raise ValueError('Provided `nobs` is inconsistent with given'
' endogenous array. Got %d and %d,'
' respectively' % (self.nobs, nobs))
self.nobs = nobs
# If we were actually given data, bind the data to this instance
if endog is not None:
self.bind(endog)
def bind(self, endog, long_format=True):
"""
Bind endogenous data to this instance
Parameters
----------
endog : array_like
Array of endogenous data.
long_format : boolean
Whether or not the array is in long format (nobs x k_endog)
Notes
-----
This method sets the `endog`, `nobs`, and possibly the `_dates`
attributes.
After the call, `self.endog` will be a
"""
# If we were given a Pandas object, check for:
# - Names match
# - Date index
if isinstance(endog, pd.Series):
if endog.name is not None and not endog.name == self._endog_names[0]:
raise ValueError('Name of the provided endogenous array does'
' not match the given endogenous name.'
' Got %s, required %s'
% (endog.name, self._endog_names[0]))
if not isinstance(endog.index, pd.DatetimeIndex):
raise ValueError("Given a pandas object and the index does "
"not contain dates")
self._dates = endog.index
elif isinstance(endog, pd.DataFrame):
# Keep only the required columns, and in the required order
endog = endog[self._endog_names]
if not isinstance(endog.index, pd.DatetimeIndex):
raise ValueError("Given a pandas object and the index does "
"not contain dates")
self._dates = endog.index
# Explicitly copy / convert to a new ndarray
# Note: typically the given endog array is in long format
# (nobs x k_endog), but _statespace assumes it is in wide format
# (k_endog x nobs). Thus we create the array in long format as order
# "C" and then transpose to get order "F".
if np.ndim(endog) == 1 or not long_format:
endog = np.array(endog, ndmin=2, order="F", copy=True)
else:
endog = np.array(endog, order="C", copy=True).T
# Check that this fits the k_endog dimension that we previously had
if not endog.shape[0] == self.k_endog:
raise ValueError('Provided endogenous array does has the required'
' number of columns. Got %d, required %d'
% (endog.shape[0], self.k_endog))
# If we were provided a strict nobs in construction, make sure it
# matches
if self.nobs is not None and not endog.shape[1] == self.nobs:
raise ValueError('Provided endogenous array is inconsistent with'
' given `nobs` . Got %d and %d, respectively'
% (endog.shape[1], self.nobs))
# Set the new dimension data
self.nobs = endog.shape[1]
# Set the new data
self.endog = endog
| bsd-2-clause |
FluidityStokes/fluidity | examples/backward_facing_step_3d/postprocessor_3d.py | 1 | 10081 | #!/usr/bin/env python3
import glob
import sys
import os
import vtktools
import numpy
import pylab
import re
import extract_data
from math import log
def get_filelist(sample, start):
def key(s):
return int(s.split('_')[-1].split('.')[0])
list = glob.glob("*vtu")
list = [l for l in list if 'checkpoint' not in l]
vtu_nos = [float(s.split('_')[-1].split('.')[0]) for s in list]
vals = zip(vtu_nos, list)
vals.sort()
unzip = lambda l:tuple(apply(zip,l))
vtu_nos, list = unzip(vals)
shortlist = []
for file in list:
try:
os.stat(file)
except:
f_log.write("No such file: %s" % files)
sys.exit(1)
##### Start at the (start+1)th file.
##### Add every nth file by taking integer multiples of n; limit at 10 vtus max.
vtu_no = float(file.split('_')[-1].split('.')[0])
#if ((max(vtu_nos)-start)/sample > 10):
# sample=int((max(vtu_nos)-start)/10)
if vtu_no > start:
if (vtu_no%sample==0):
shortlist.append(file)
##### Append final file if a large number of files remain.
elif vtu_no==len(vtu_nos)-1 and (max(vtu_nos)-sample/4.0)>vtu_no:
shortlist.append(file)
return shortlist
#### taken from http://www.codinghorror.com/blog/archives/001018.html #######
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
##############################################################################
# There are shorter and more elegant version of the above, but this works
# on CX1, where this test might be run...
###################################################################
# Reattachment length:
def reatt_length(filelist, zarray):
print("Calculating reattachment point locations using change of x-velocity sign\n")
nums=[]; results=[]; files = []
##### check for no files
if (len(filelist) == 0):
print("No files!")
sys.exit(1)
for file in filelist:
try:
os.stat(file)
except:
print("No such file: %s" % file)
sys.exit(1)
files.append(file)
sort_nicely(files)
for file in files:
##### Read in data from vtu
datafile = vtktools.vtu(file)
##### Get time for plot:
t = min(datafile.GetScalarField("Time"))
print(file, ', elapsed time = ', t)
if(t<0.):
continue
else:
print("extracting data...")
##### points near bottom surface, 0 < x < 25
x2array=[]; pts=[]; no_pts = 52; offset = 0.01
x = 0.0
for i in range(1, no_pts):
x2array.append(x)
for j in range(len(zarray)):
pts.append((x, zarray[j], offset))
x += 0.5
x2array = numpy.array(x2array)
pts = numpy.array(pts)
##### Get x-velocity on bottom boundary
uvw = datafile.ProbeData(pts, "AverageVelocity")
u = uvw[:,0]
u = u.reshape([x2array.size,zarray.size])
pts=pts.reshape([x2array.size,zarray.size,3])
##### Find all potential reattachment points:
points = []
for j in range(len(u[0,:])):
for i in range(len(u[:,0])-1):
##### Hack to ignore division by zero entries in u.
##### All u should be nonzero away from boundary!
if((u[i,j] / u[i+1,j]) < 0. and u[i+1,j] > 0. and not numpy.isinf(u[i,j] / u[i+1,j])):
##### interpolate between nodes
p = x2array[i] + (x2array[i+1]-x2array[i]) * (0.0-u[i,j]) / (u[i+1,j]-u[i,j])
##### Ignore spurious corner points
if(p>1.0):
points.append(p)
##### We have our first point on this plane so...
break
##### This is the spanwise-averaged reattachment point:
if (len(points)>0):
avpt = sum(points) / len(points)
else:
avpt = 0.0
print('spanwise averaged reattachment point: ', avpt)
##### Get time for plot:
t = min(datafile.GetScalarField("Time"))
results.append([avpt,t])
return results
#########################################################################
# Velocity profiles:
def velo(filelist,xarray,zarray,yarray):
print("\nRunning mean velocity profile script on files at times...\n")
##### check for no files
if (len(filelist) < 0):
print("No files!")
sys.exit(1)
##### create array of points
pts=[]
for i in range(len(xarray)):
for j in range(len(zarray)):
for k in range(len(yarray)):
pts.append([xarray[i], zarray[j], yarray[k]])
pts=numpy.array(pts)
##### Create output array of correct shape
profiles=numpy.zeros([xarray.size, yarray.size], float)
file = filelist[-1]
#for file in filelist:
datafile = vtktools.vtu(file)
# Get time
t = min(datafile.GetScalarField("Time"))
print(file, ', elapsed time = ', t)
##### Get x-velocity
uvw = datafile.ProbeData(pts, "AverageVelocity")
umax = 1.55
u = uvw[:,0]/umax
u = u.reshape([xarray.size,zarray.size,yarray.size])
##### Spanwise averaging
usum = numpy.zeros([xarray.size,yarray.size],float)
usum = numpy.array(usum)
for i in range(len(zarray)):
uav = u[:,i,:]
uav = numpy.array(uav)
usum += uav
usum = usum / len(zarray)
profiles[:,:] = usum
print("\n...Finished extracting data.\n")
return profiles
#########################################################################
def plot_length(rl):
##### Plot time series of reattachment length
av_length = sum(rl[:,0]) / len(rl[:,0])
avg = numpy.zeros([len(rl[:,0])])
avg[:] = av_length
Lemoinkim = numpy.zeros([len(rl[:,0])])
Lemoinkim[:]=6.28
plot1 = pylab.figure()
pylab.title("Time series of reattachment length")
pylab.xlabel('Time (s)')
pylab.ylabel('Reattachment Length (L/h)')
pylab.plot(rl[:,1], rl[:,0], marker = 'o', markerfacecolor='white', markersize=6, markeredgecolor='black', linestyle="solid")
pylab.plot(rl[:,1], Lemoinkim, linestyle="dashed")
pylab.legend(("Fluidity","Le-Moin-Kim DNS"), loc="best")
pylab.axis([min(rl[:,1]),max(rl[:,1]),min(rl[:,0])-0.5,max(rl[:,0])+0.5])
pylab.savefig("reatt_len_3d.pdf")
return
#########################################################################
def plot_velo(vprofiles,xarray,yarray):
# get profiles from ERCOFTAC data
y4,U4,y6,U6,y10,U10,y19,U19 = extract_data.ercoftacvelocityprofiles()
# get profiles from Le&Moin data
Le_y4,Le_u4,jd_y4,jd_u4,Le_y6,Le_u6,jd_y6,jd_u6,Le_y10,Le_u10,jd_y10,jd_u10,Le_y19,Le_u19,jd_y19,jd_u19 = extract_data.velocityprofileslemoin()
##### Plot velocity profiles at different points behind step using pylab(matplotlib)
plot1 = pylab.figure(figsize = (16.5, 8.5))
pylab.suptitle("Evolution of mean U-velocity", fontsize=20)
size = 15
ax = pylab.subplot(141)
ax.plot(vprofiles[0,:],yarray, linestyle="solid")
ax.plot(U4,y4, linestyle="dashed")
ax.plot(jd_u4,jd_y4, linestyle="none",marker='o',color='black')
ax.set_title('(a) x/h='+str(xarray[0]), fontsize=16)
pylab.legend(('Fluidity',"Le&Moin DNS","Jovic&Driver expt"),loc="upper left")
#ax.grid("True")
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(size)
bx = pylab.subplot(142, sharex=ax, sharey=ax)
bx.plot(vprofiles[1,:],yarray, linestyle="solid")
bx.plot(U6,y6, linestyle="dashed")
bx.plot(jd_u6,jd_y6, linestyle="none",marker='o',color='black')
bx.set_title('(a) x/h='+str(xarray[1]), fontsize=16)
#bx.grid("True")
for tick in bx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(bx.get_yticklabels(), visible=False)
cx = pylab.subplot(143, sharex=ax, sharey=ax)
cx.plot(vprofiles[2,:],yarray, linestyle="solid")
cx.plot(U10,y10, linestyle="dashed")
cx.plot(jd_u10,jd_y10, linestyle="none",marker='o',color='black')
cx.set_title('(a) x/h='+str(xarray[2]), fontsize=16)
#bx.grid("True")
for tick in cx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(cx.get_yticklabels(), visible=False)
dx = pylab.subplot(144, sharex=ax, sharey=ax)
dx.plot(vprofiles[3,:],yarray, linestyle="solid")
dx.plot(U19,y19, linestyle="dashed")
dx.plot(jd_u19,jd_y19, linestyle="none",marker='o',color='black')
dx.set_title('(a) x/h='+str(xarray[3]), fontsize=16)
#bx.grid("True")
for tick in dx.xaxis.get_major_ticks():
tick.label1.set_fontsize(size)
pylab.setp(dx.get_yticklabels(), visible=False)
pylab.axis([-0.25, 1., 0., 3.])
bx.set_xlabel('Normalised mean U-velocity (U/Umax)', fontsize=24)
ax.set_ylabel('y/h', fontsize=24)
pylab.savefig("velo_profiles_3d.pdf")
return
#########################################################################
def main():
##### Only process every nth file by taking integer multiples of n:
filelist = get_filelist(sample=1, start=0)
##### Points to generate profiles:
xarray = numpy.array([4.0, 6.0, 10.0, 19.0])
zarray = numpy.linspace(0.0,4.0,41)
yarray = numpy.array([0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.11,0.12,0.13,0.14,0.15,0.16,0.17,0.18,0.19,0.2,0.21,0.22,0.23,0.24,0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0,3.1,3.2,3.3,3.4,3.5,3.6,3.7,3.8,3.9,4.0,4.1,4.2,4.3,4.4,4.5,4.6,4.7,4.8,4.9,5.0])
##### Call reattachment_length function
reattachment_length = numpy.array(reatt_length(filelist, zarray))
numpy.save("reatt_length", reattachment_length)
plot_length(reattachment_length)
##### Call velo function
zarray = numpy.array([2.0])
vprofiles = velo(filelist, xarray, zarray, yarray)
numpy.save("velo_profiles", vprofiles)
print("Generating plot of velocity profiles.")
plot_velo(vprofiles,xarray,yarray)
print("\nAll done.\n")
if __name__ == "__main__":
sys.exit(main())
| lgpl-2.1 |
Barmaley-exe/scikit-learn | examples/cluster/plot_cluster_comparison.py | 12 | 4718 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch'
]
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch
]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
cowlicks/blaze | blaze/server/tests/test_server.py | 2 | 20838 | from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('flask')
from base64 import b64encode
from contextlib import contextmanager
from copy import copy
import datashape
from datashape.util.testing import assert_dshape_equal
import numpy as np
from odo import odo, convert
from datetime import datetime
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from toolz import pipe
from blaze.dispatch import dispatch
from blaze.expr import Expr
from blaze.utils import example
from blaze import (discover, symbol, by, CSV, compute, join, into, resource,
Data)
from blaze.server.client import mimetype
from blaze.server.server import Server, to_tree, from_tree
from blaze.server.serialization import all_formats
accounts = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
cities = DataFrame([['Alice', 'NYC'], ['Bob', 'LA']],
columns=['name', 'city'])
events = DataFrame([[1, datetime(2000, 1, 1, 12, 0, 0)],
[2, datetime(2000, 1, 2, 12, 0, 0)]],
columns=['value', 'when'])
db = resource('sqlite:///' + example('iris.db'))
class DumbResource(object):
df = DataFrame({
'a': np.arange(5),
'b': np.arange(5, 10),
})
class NoResource(Exception):
pass
@convert.register(DataFrame, DumbResource)
def dumb_to_df(d, return_df=None, **kwargs):
if return_df is None:
raise DumbResource.NoResource('return_df must be passed')
to_return = odo(return_df, DataFrame, dshape=discover(d))
assert_frame_equal(to_return, DumbResource.df)
return to_return
@dispatch(Expr, DumbResource)
def compute_down(expr, d, **kwargs):
return dumb_to_df(d, **kwargs)
@discover.register(DumbResource)
def _discover_dumb(d):
return discover(DumbResource.df)
data = {
'accounts': accounts,
'cities': cities,
'events': events,
'db': db,
'dumb': DumbResource(),
}
@pytest.fixture(scope='module')
def server():
s = Server(data, all_formats)
s.app.testing = True
return s
@contextmanager
def temp_server(data=None):
"""For when we want to mutate the server"""
s = Server(copy(data), formats=all_formats)
s.app.testing = True
yield s.app.test_client()
@pytest.yield_fixture
def test(server):
with server.app.test_client() as c:
yield c
@pytest.yield_fixture
def empty_server():
s = Server(formats=all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
@pytest.yield_fixture
def iris_server():
iris = CSV(example('iris.csv'))
s = Server(iris, all_formats)
s.app.testing = True
with s.app.test_client() as c:
yield c
def test_datasets(test):
response = test.get('/datashape')
assert_dshape_equal(
datashape.dshape(response.data.decode('utf-8')),
datashape.dshape(discover(data))
)
@pytest.mark.parametrize('serial', all_formats)
def test_bad_responses(test, serial):
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
data=serial.dumps(500),
).status
assert 'OK' not in test.post(
'/compute/non-existent-table.{name}'.format(name=serial.name),
data=serial.dumps(0),
).status
assert 'OK' not in test.post(
'/compute/accounts.{name}'.format(name=serial.name),
).status
def test_to_from_json():
t = symbol('t', 'var * {name: string, amount: int}')
assert from_tree(to_tree(t)).isidentical(t)
assert from_tree(to_tree(t.amount + 1)).isidentical(t.amount + 1)
def test_to_tree():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount.sum()
expected = {'op': 'sum',
'args': [{'op': 'Field',
'args':
[
{'op': 'Symbol',
'args': [
't',
'var * {name: string, amount: int32}',
None
]
},
'amount'
]
}, [0], False]
}
assert to_tree(expr) == expected
@pytest.mark.parametrize('serial', all_formats)
def test_to_tree_slice(serial):
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t[:5]
expr2 = pipe(expr, to_tree, serial.dumps, serial.loads, from_tree)
assert expr.isidentical(expr2)
def test_to_from_tree_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.name
tree = to_tree(expr, names={t: 't'})
assert tree == {'op': 'Field', 'args': ['t', 'name']}
new = from_tree(tree, namespace={'t': t})
assert new.isidentical(expr)
def test_from_tree_is_robust_to_unnecessary_namespace():
t = symbol('t', 'var * {name: string, amount: int32}')
expr = t.amount + 1
tree = to_tree(expr) # don't use namespace
assert from_tree(tree, {'t': t}).isidentical(expr)
t = symbol('t', discover(data))
@pytest.mark.parametrize('serial', all_formats)
def test_compute(test, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
expected = 300
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['amount_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_get_datetimes(test, serial):
expr = t.events
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
ds = datashape.dshape(data['datashape'])
result = into(np.ndarray, data['data'], dshape=ds)
assert into(list, result) == into(list, events)
assert data['names'] == events.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def dont_test_compute_with_namespace(test, serial):
query = {'expr': {'op': 'Field',
'args': ['accounts', 'name']}}
expected = ['Alice', 'Bob']
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
assert data['data'] == expected
assert data['names'] == ['name']
iris = CSV(example('iris.csv'))
@pytest.mark.parametrize('serial', all_formats)
def test_compute_with_variable_in_namespace(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
pl = symbol('pl', 'float32')
expr = t[t.petal_length > pl].species
tree = to_tree(expr, {pl: 'pl'})
blob = serial.dumps({'expr': tree, 'namespace': {'pl': 5}})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = list(compute(expr._subs({pl: 5}), {t: iris}))
assert result == expected
assert data['names'] == ['species']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_by_with_summary(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
expr = by(
t.species,
max=t.petal_length.max(),
sum=t.petal_width.sum(),
)
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = DataFrame(data['data']).values
expected = compute(expr, iris).values
np.testing.assert_array_equal(result[:, 0], expected[:, 0])
np.testing.assert_array_almost_equal(result[:, 1:], expected[:, 1:])
assert data['names'] == ['species', 'max', 'sum']
@pytest.mark.parametrize('serial', all_formats)
def test_compute_column_wise(iris_server, serial):
test = iris_server
t = symbol('t', discover(iris))
subexpr = ((t.petal_width / 2 > 0.5) &
(t.petal_length / 2 > 0.5))
expr = t[subexpr]
tree = to_tree(expr)
blob = serial.dumps({'expr': tree})
resp = test.post(
'/compute',
data=blob,
headers=mimetype(serial)
)
assert 'OK' in resp.status
data = serial.loads(resp.data)
result = data['data']
expected = compute(expr, iris)
assert list(map(tuple, result)) == into(list, expected)
assert data['names'] == t.fields
@pytest.mark.parametrize('serial', all_formats)
def test_multi_expression_compute(test, serial):
s = symbol('s', discover(data))
expr = join(s.accounts, s.cities)
resp = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert 'OK' in resp.status
respdata = serial.loads(resp.data)
result = respdata['data']
expected = compute(expr, {s: data})
assert list(map(tuple, result)) == into(list, expected)
assert respdata['names'] == expr.fields
@pytest.mark.parametrize('serial', all_formats)
def test_leaf_symbol(test, serial):
query = {'expr': {'op': 'Field', 'args': [':leaf', 'cities']}}
resp = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
data = serial.loads(resp.data)
a = data['data']
b = into(list, cities)
assert list(map(tuple, a)) == b
assert data['names'] == cities.columns.tolist()
@pytest.mark.parametrize('serial', all_formats)
def test_sqlalchemy_result(test, serial):
expr = t.db.iris.head(5)
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
data = serial.loads(response.data)
result = data['data']
assert all(isinstance(item, (tuple, list)) for item in result)
assert data['names'] == t.db.iris.fields
def test_server_accepts_non_nonzero_ables():
Server(DataFrame())
@pytest.mark.parametrize('serial', all_formats)
def test_server_can_compute_sqlalchemy_reductions(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.mark.parametrize('serial', all_formats)
def test_serialization_endpoints(test, serial):
expr = t.db.iris.petal_length.sum()
query = {'expr': to_tree(expr)}
response = test.post(
'/compute',
data=serial.dumps(query),
headers=mimetype(serial)
)
assert 'OK' in response.status
respdata = serial.loads(response.data)
result = respdata['data']
assert result == odo(compute(expr, {t: data}), int)
assert respdata['names'] == ['petal_length_sum']
@pytest.fixture
def has_bokeh():
try:
from bokeh.server.crossdomain import crossdomain
except ImportError as e:
pytest.skip(str(e))
@pytest.mark.parametrize('serial', all_formats)
def test_cors_compute(test, serial, has_bokeh):
expr = t.db.iris.petal_length.sum()
res = test.post(
'/compute',
data=serial.dumps(dict(expr=to_tree(expr))),
headers=mimetype(serial)
)
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' in res.headers['Access-Control-Allow-Methods']
assert 'POST' in res.headers['Access-Control-Allow-Methods']
# we don't allow gets because we're always sending data
assert 'GET' not in res.headers['Access-Control-Allow-Methods']
@pytest.mark.parametrize('method',
['get',
pytest.mark.xfail('head', raises=AssertionError),
pytest.mark.xfail('options', raises=AssertionError),
pytest.mark.xfail('post', raises=AssertionError)])
def test_cors_datashape(test, method, has_bokeh):
res = getattr(test, method)('/datashape')
assert res.status_code == 200
assert res.headers['Access-Control-Allow-Origin'] == '*'
assert 'HEAD' not in res.headers['Access-Control-Allow-Methods']
assert 'OPTIONS' not in res.headers['Access-Control-Allow-Methods']
assert 'POST' not in res.headers['Access-Control-Allow-Methods']
# we only allow GET requests
assert 'GET' in res.headers['Access-Control-Allow-Methods']
@pytest.fixture(scope='module')
def username():
return 'blaze-dev'
@pytest.fixture(scope='module')
def password():
return 'SecretPassword123'
@pytest.fixture(scope='module')
def server_with_auth(username, password):
def auth(a):
return a and a.username == username and a.password == password
s = Server(data, all_formats, authorization=auth)
s.app.testing = True
return s
@pytest.yield_fixture
def test_with_auth(server_with_auth):
with server_with_auth.app.test_client() as c:
yield c
def basic_auth(username, password):
return (
b'Basic ' + b64encode(':'.join((username, password)).encode('utf-8'))
)
@pytest.mark.parametrize('serial', all_formats)
def test_auth(test_with_auth, username, password, serial):
expr = t.accounts.amount.sum()
query = {'expr': to_tree(expr)}
r = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username, password)},
)
assert r.status_code == 200
headers = mimetype(serial)
headers['authorization'] = basic_auth(username, password)
s = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert s.status_code == 200
u = test_with_auth.get(
'/datashape',
headers={'authorization': basic_auth(username + 'a', password + 'a')},
)
assert u.status_code == 401
headers['authorization'] = basic_auth(username + 'a', password + 'a')
v = test_with_auth.post(
'/compute',
data=serial.dumps(query),
headers=headers,
)
assert v.status_code == 401
@pytest.mark.parametrize('serial', all_formats)
def test_minute_query(test, serial):
expr = t.events.when.minute
query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(query)
)
expected = {
'data': [0, 0],
'names': ['when_minute'],
'datashape': '2 * int64'
}
assert result.status_code == 200
assert expected == serial.loads(result.data)
@pytest.mark.parametrize('serial', all_formats)
def test_isin(test, serial):
expr = t.events.value.isin(frozenset([1]))
query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(query)
)
expected = {
'data': [True, False],
'names': ['value'],
'datashape': '2 * bool',
}
assert result.status_code == 200
assert expected == serial.loads(result.data)
@pytest.mark.parametrize('serial', all_formats)
def test_add_data_to_empty_server(empty_server, serial):
# add data
with temp_server() as test:
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = empty_server.post(
'/add',
headers=mimetype(serial),
data=blob,
)
assert 'OK' in response1.status
assert response1.status_code == 200
# check for expected server datashape
response2 = empty_server.get('/datashape')
expected2 = str(discover({'iris': resource(iris_path)}))
assert response2.data.decode('utf-8') == expected2
# compute on added data
t = Data({'iris': resource(iris_path)})
expr = t.iris.petal_length.sum()
response3 = empty_server.post(
'/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial)
)
result3 = serial.loads(response3.data)['data']
expected3 = compute(expr, {'iris': resource(iris_path)})
assert result3 == expected3
@pytest.mark.parametrize('serial', all_formats)
def test_add_data_to_server(serial):
with temp_server(data) as test:
# add data
initial_datashape = datashape.dshape(test.get('/datashape').data.decode('utf-8'))
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = test.post(
'/add',
headers=mimetype(serial),
data=blob,
)
assert 'OK' in response1.status
assert response1.status_code == 200
# check for expected server datashape
new_datashape = datashape.dshape(test.get('/datashape').data.decode('utf-8'))
data2 = data.copy()
data2.update({'iris': resource(iris_path)})
expected2 = datashape.dshape(discover(data2))
from pprint import pprint as pp
assert_dshape_equal(new_datashape, expected2)
assert new_datashape.measure.fields != initial_datashape.measure.fields
# compute on added data
t = Data({'iris': resource(iris_path)})
expr = t.iris.petal_length.sum()
response3 = test.post(
'/compute',
data=serial.dumps({'expr': to_tree(expr)}),
headers=mimetype(serial)
)
result3 = serial.loads(response3.data)['data']
expected3 = compute(expr, {'iris': resource(iris_path)})
assert result3 == expected3
@pytest.mark.parametrize('serial', all_formats)
def test_cant_add_data_to_server(iris_server, serial):
# try adding more data to server
iris_path = example('iris.csv')
blob = serial.dumps({'iris': iris_path})
response1 = iris_server.post(
'/add',
headers=mimetype(serial),
data=blob,
)
assert response1.status_code == 422
@pytest.mark.parametrize('serial', all_formats)
def test_bad_add_payload(empty_server, serial):
# try adding more data to server
blob = serial.dumps('This is not a mutable mapping.')
response1 = empty_server.post(
'/add',
headers=mimetype(serial),
data=blob,
)
assert response1.status_code == 422
@pytest.mark.parametrize('serial', all_formats)
def test_odo_kwargs(test, serial):
expr = t.dumb
bad_query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(bad_query),
)
assert result.status_code == 500
assert b'return_df must be passed' in result.data
good_query = {
'expr': to_tree(expr),
'odo_kwargs': {
'return_df': odo(DumbResource.df, list),
},
}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(good_query)
)
assert result.status_code == 200
data = serial.loads(result.data)
dshape = discover(DumbResource.df)
assert_dshape_equal(
datashape.dshape(data['datashape']),
dshape,
)
assert_frame_equal(
odo(data['data'], DataFrame, dshape=dshape),
DumbResource.df,
)
@pytest.mark.parametrize('serial', all_formats)
def test_compute_kwargs(test, serial):
expr = t.dumb.sort()
bad_query = {'expr': to_tree(expr)}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(bad_query),
)
assert result.status_code == 500
assert b'return_df must be passed' in result.data
good_query = {
'expr': to_tree(expr),
'compute_kwargs': {
'return_df': odo(DumbResource.df, list),
},
}
result = test.post(
'/compute',
headers=mimetype(serial),
data=serial.dumps(good_query)
)
assert result.status_code == 200
data = serial.loads(result.data)
dshape = discover(DumbResource.df)
assert_dshape_equal(
datashape.dshape(data['datashape']),
dshape,
)
assert_frame_equal(
odo(data['data'], DataFrame, dshape=dshape),
DumbResource.df,
)
| bsd-3-clause |
calatre/epidemics_network | models/SIR_non_spacial.py | 1 | 2531 | # Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 7/6/2017
# Simulation of a (non-spacial) SIR Epidemic Model
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import colors
#Possible status of a person - Dead, Healthy (but susceptible), Sick
DEAD, HEALTH, SICK = 0, 1, 2
# Colours for visualization: black = dead, green = healthy, red = sick
#Note that apparently for the colormap to work, this list and the bounds list
# must be one larger than the number of different values in the array.
colors_list = ['k', 'g', 'r', 'b', 'b']
cmap = colors.ListedColormap(colors_list)
bounds = [0,1,2,3,4]
norm = colors.BoundaryNorm(bounds, cmap.N)
#Defining the main function:
inf = 1
def iterate_ns(X, cval, rval):
"""Iterate the map according to the epidemic rules."""
global inf
turninf = 0
X1 = np.zeros((ny, nx))
for ix in range(nx):
for iy in range(ny):
rem = np.random.random()
con = np.random.random()
#if sick, probability (1-r) of surviving one more turn
if X[iy,ix] == SICK and rem >= rval:
X1[iy,ix] = SICK
#There's a chance cval*inf of contagion, no matter the position
if X[iy,ix] == HEALTH and con <= cval*inf:
X1[iy,ix] = SICK
turninf += 1 #this one adds to the turn's statistics
if X[iy,ix] == HEALTH and con >= cval*inf:
X1[iy,ix] = HEALTH
print('this turn:' + str(turninf))
inf += turninf
print ('total inf:' + str(inf))
return X1
# base probabilities: people born, diseases appear, chance of contagion
cval = 0.0001
#transition probabilities: if sick, subject _s_urvives < dies
rval = 0.01
# map size (number of cells in x and y directions).
nx, ny = 100, 100
# Initialize the map grid.
X = np.ones((ny, nx))
X[int(nx/2),int(ny/2)] = SICK
#plotting a single frame
fig = plt.figure(figsize=(25/3, 6.25))
ax = fig.add_subplot(111)
ax.set_axis_off()
im = ax.imshow(X, cmap=cmap, norm=norm)#, interpolation='nearest')
# The animation function: called to produce a frame for each generation.
def animate(i):
im.set_data(animate.X)
animate.X = iterate_ns(animate.X, cval, rval)
# Bind our grid to the identifier X in the animate function's namespace.
animate.X = X
# Interval between frames (ms).
interval = 2000
anim = animation.FuncAnimation(fig, animate, interval = interval)
plt.show()
| apache-2.0 |
openpathsampling/openpathsampling | openpathsampling/analysis/tis/crossing_probability.py | 3 | 5290 | import collections
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject
from openpathsampling.numerics import LookupFunction
import pandas as pd
import numpy as np
from .core import EnsembleHistogrammer, MultiEnsembleSamplingAnalyzer
class FullHistogramMaxLambdas(EnsembleHistogrammer):
"""Histogramming the full max-lambda function (one way of getting TCP)
This histograms the maximum value of lambda for each ensemble. One of
these objects is made per transition.
Parameters
----------
transition: :class:`.TISTransition`
the transition to be analyzed
hist_parameters: dict
Histogram parameters to use with this collective variable: allowed
keys are 'bin_width' and 'bin_range'; value for 'bin_width' is a
float; for 'bin_range' is a tuple with `(left_edge, right_edge)`
(only left edge is used)
max_lambda_func: callable
function to use to map the trajectories to a histogram; default is
`None`, which uses the maximum value of the order parameter
associated with the interface set. Overriding this can be used if
either (a) the interface set does not have an order parameter
associated with it, or (b) you want to calculate the values along
some other order parameter
"""
def __init__(self, transition, hist_parameters, max_lambda_func=None):
self.transition = transition
if max_lambda_func is None:
try:
max_lambda_func = transition.interfaces.cv_max
except AttributeError:
pass # leave max_lambda_func as None
if max_lambda_func is None:
raise RuntimeError("Can't identify function to determine max "
+ "value of order parameter.")
# TODO: is this used?
self.lambdas = {e: l for (e, l) in zip(transition.ensembles,
transition.interfaces.lambdas)}
super(FullHistogramMaxLambdas, self).__init__(
ensembles=transition.ensembles,
f=max_lambda_func,
hist_parameters=hist_parameters
)
#class PerEnsembleMaxLambdas(EnsembleHistogrammer):
# TODO: this just maps the count to the ensemble, not the full histogram
#def __init__(self, transition):
#interfaces_lambdas = transition.interfaces.lambdas
class TotalCrossingProbability(MultiEnsembleSamplingAnalyzer):
"""
Calculate the total crossing probability function.
The total crossing probability function is generated by calculating the
individual ensemble crossing probability functions (using, e.g.,
:class:`.FullHistogramMaxLambdas`, and combining them using some
combining method (default is :class:`.WHAM`). One of these objects is
instantiated per transition.
Parameters
----------
max_lambda_calc: :class:`.EnsembleHistogrammer`
usually :class:`.FullHistogramMaxLambdas`; object that creates the
max lambda histograms for the ensembles associated with this
transition.
combiner: TODO
class that combines multiple histograms (with restricted sampling)
into a single result. If `None` (default), uses :class:`.WHAM`
"""
def __init__(self, max_lambda_calc, combiner=None):
transition = max_lambda_calc.transition
super(TotalCrossingProbability, self).__init__(transition.ensembles)
self.max_lambda_calc = max_lambda_calc
self.transition = transition
if combiner is None:
lambdas = self.transition.interfaces.lambdas
combiner = paths.numerics.WHAM(interfaces=lambdas)
self.combiner = combiner
def from_weighted_trajectories(self, input_dict):
"""Calculate results from a weighted trajectories dictionary.
Parameters
----------
input_dict : dict of {:class:`.Ensemble`: collections.Counter}
ensemble as key, and a counter mapping each trajectory
associated with that ensemble to its counter of time spent in
the ensemble (output of ``steps_to_weighted_trajectories``)
Returns
-------
:class:`.LookupFunction`
the total crossing probability function
"""
hists = self.max_lambda_calc.from_weighted_trajectories(input_dict)
return self.from_ensemble_histograms(hists)
def from_ensemble_histograms(self, hists):
"""Calculate results from a dict of ensemble histograms.
Parameters
----------
hists : dict of {:class:`.Ensemble`: :class:`.numerics.Histogram`}
histogram for each ensemble (from ``self.max_lambda_calc``)
Returns
-------
:class:`.LookupFunction`
the total crossing probability function
"""
tcp_results = {}
input_hists = [hists[ens] for ens in self.transition.ensembles]
df = paths.numerics.histograms_to_pandas_dataframe(
input_hists,
fcn="reverse_cumulative"
).sort_index(axis=1)
# TODO: remove WHAM-specific name here
tcp = self.combiner.wham_bam_histogram(df).to_dict()
return LookupFunction(tcp.keys(), tcp.values())
| mit |
grahesh/Stock-Market-Event-Analysis | quicksim/strategies/bollinger.py | 4 | 3609 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
# bollinger.py
#
# A module which contains a bollinger strategy.
#
#
#python imports
import cPickle
from pylab import *
from pandas import *
import matplotlib.pyplot as plt
import datetime as dt
import os
#qstk imports
from qstkutil import DataAccess as da
import qstkutil.qsdateutil as du
import qstkutil.bollinger as boil
#simple versions
#stateful
def createStatefulStrat(adjclose, timestamps, lookback, highthresh, lowthresh):
alloc=DataMatrix(index=[timestamps[0]],columns=adjclose.columns, data=[zeros(len(adjclose.columns))])
bs=boil.calcbvals(adjclose, timestamps, adjclose.columns, lookback)
hold=[]
for i in bs.index[1:]:
for stock in range(0,len(bs.columns)):
if(bs.xs(i)[stock]<lowthresh and len(hold)<10):
hold.append(stock)
elif(bs.xs(i)[stock]>highthresh):
if stock in hold:
hold.remove(stock)
vals=zeros(len(adjclose.columns))
for j in range(0,len(hold)):
vals[hold[j]]=.1
alloc=alloc.append(DataMatrix(index=[i],columns=adjclose.columns,data=[vals]))
return alloc
#stateless
def createStatelessStrat(adjclose, timestamps, lookback, highthresh, lowthresh):
return create(adjclose,timestamps,lookback,len(adjclose.columns),highthresh,lowthresh,.1,len(adjclose.index))
#creates an allocation DataMatrix based on bollinger strategy and paramaters
def create(adjclose, timestamps, lookback, spread, high, low, bet, duration):
alloc=DataMatrix(index=[timestamps[0]],columns=adjclose.columns, data=[zeros(len(adjclose.columns))])
bs=boil.calcbvals(adjclose, timestamps, adjclose.columns, lookback)
hold=[]
time=[]
for i in bs.index[1:]:
for stock in range(0,len(bs.columns)):
if(bs.xs(i)[stock]<low and len(hold)<spread):
hold.append(stock)
time.append(duration)
elif(bs.xs(i)[stock]>high):
if stock in hold:
del time[hold.index(stock)]
hold.remove(stock)
for j in range(0,len(time)):
time[j]-=1
if(time[j]<=0):
del hold[j]
del time[j]
vals=zeros(len(adjclose.columns))
for j in range(0,len(hold)):
vals[hold[j]]=bet
alloc=alloc.append(DataMatrix(index=[i],columns=adjclose.columns,data=[vals]))
return alloc
if __name__ == "__main__":
#Usage: python bollinger.py '1-1-2004' '1-1-2009' 'alloc.pkl'
print "Running Bollinger strategy starting "+sys.argv[1]+" and ending "+sys.argv[2]+"."
#Run S&P500 for thresholds 1 and -1 in simple version for lookback of 10 days
symbols = list(np.loadtxt(os.environ['QS']+'/quicksim/strategies/S&P500.csv',dtype='str',delimiter=',',comments='#',skiprows=0))
t=map(int,sys.argv[1].split('-'))
startday = dt.datetime(t[2],t[0],t[1])
t=map(int,sys.argv[2].split('-'))
endday = dt.datetime(t[2],t[0],t[1])
timeofday=dt.timedelta(hours=16)
timestamps=du.getNYSEdays(startday,endday,timeofday)
dataobj=da.DataAccess(da.DataSource.NORGATE)
intersectsyms=list(set(dataobj.get_all_symbols())&set(symbols))
badsyms=[]
if size(intersectsyms)<size(symbols):
badsyms=list(set(symbols)-set(intersectsyms))
print "bad symms:"
print badsyms
for i in badsyms:
index=symbols.index(i)
symbols.pop(index)
historic = dataobj.get_data(timestamps,symbols,"close")
alloc=createStatelessStrat(historic,timestamps,10,1,-1)
output=open(sys.argv[3],"wb")
cPickle.dump(alloc,output)
output.close()
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/base/model.py | 25 | 76781 | from __future__ import print_function
from statsmodels.compat.python import iterkeys, lzip, range, reduce
import numpy as np
from scipy import stats
from statsmodels.base.data import handle_data
from statsmodels.tools.tools import recipr, nan_dot
from statsmodels.stats.contrast import ContrastResults, WaldTestResults
from statsmodels.tools.decorators import resettable_cache, cache_readonly
import statsmodels.base.wrapper as wrap
from statsmodels.tools.numdiff import approx_fprime
from statsmodels.formula import handle_formula_data
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.base.optimizer import Optimizer
_model_params_doc = """
Parameters
----------
endog : array-like
1-d endogenous response variable. The dependent variable.
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See
:func:`statsmodels.tools.add_constant`."""
_missing_param_doc = """\
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none.'"""
_extra_param_doc = """
hasconst : None or bool
Indicates whether the RHS includes a user-supplied constant. If True,
a constant is not checked for and k_constant is set to 1 and all
result statistics are calculated as if a constant is present. If
False, a constant is not checked for and k_constant is set to 0.
"""
class Model(object):
__doc__ = """
A (predictive) statistical model. Intended to be subclassed not used.
%(params_doc)s
%(extra_params_doc)s
Notes
-----
`endog` and `exog` are references to any data provided. So if the data is
already stored in numpy arrays and it is changed then `endog` and `exog`
will change as well.
""" % {'params_doc' : _model_params_doc,
'extra_params_doc' : _missing_param_doc + _extra_param_doc}
def __init__(self, endog, exog=None, **kwargs):
missing = kwargs.pop('missing', 'none')
hasconst = kwargs.pop('hasconst', None)
self.data = self._handle_data(endog, exog, missing, hasconst,
**kwargs)
self.k_constant = self.data.k_constant
self.exog = self.data.exog
self.endog = self.data.endog
self._data_attr = []
self._data_attr.extend(['exog', 'endog', 'data.exog', 'data.endog'])
if 'formula' not in kwargs: # won't be able to unpickle without these
self._data_attr.extend(['data.orig_endog', 'data.orig_exog'])
# store keys for extras if we need to recreate model instance
# we don't need 'missing', maybe we need 'hasconst'
self._init_keys = list(kwargs.keys())
if hasconst is not None:
self._init_keys.append('hasconst')
def _get_init_kwds(self):
"""return dictionary with extra keys used in model.__init__
"""
kwds = dict(((key, getattr(self, key, None))
for key in self._init_keys))
return kwds
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
data = handle_data(endog, exog, missing, hasconst, **kwargs)
# kwargs arrays could have changed, easier to just attach here
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
# pop so we don't start keeping all these twice or references
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError: # panel already pops keys in data handling
pass
return data
@classmethod
def from_formula(cls, formula, data, subset=None, *args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
subset : array-like
An array-like object of booleans, integers, or index values that
indicate the subset of df to use in the model. Assumes df is a
`pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
data must define __getitem__ with the keys in the formula terms
args and kwargs are passed on to the model instantiation. E.g.,
a numpy structured or rec array, a dictionary, or a pandas DataFrame.
"""
#TODO: provide a docs template for args/kwargs from child models
#TODO: subset could use syntax. issue #469.
if subset is not None:
data = data.ix[subset]
eval_env = kwargs.pop('eval_env', None)
if eval_env is None:
eval_env = 2
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
else:
eval_env += 1 # we're going down the stack again
missing = kwargs.get('missing', 'drop')
if missing == 'none': # with patys it's drop or raise. let's raise.
missing = 'raise'
tmp = handle_formula_data(data, None, formula, depth=eval_env,
missing=missing)
((endog, exog), missing_idx, design_info) = tmp
kwargs.update({'missing_idx': missing_idx,
'missing': missing,
'formula': formula, # attach formula for unpckling
'design_info': design_info})
mod = cls(endog, exog, *args, **kwargs)
mod.formula = formula
# since we got a dataframe, attach the original
mod.data.frame = data
return mod
@property
def endog_names(self):
return self.data.ynames
@property
def exog_names(self):
return self.data.xnames
def fit(self):
"""
Fit a model to data.
"""
raise NotImplementedError
def predict(self, params, exog=None, *args, **kwargs):
"""
After a model has been fit predict returns the fitted values.
This is a placeholder intended to be overwritten by individual models.
"""
raise NotImplementedError
class LikelihoodModel(Model):
"""
Likelihood model is a subclass of Model.
"""
def __init__(self, endog, exog=None, **kwargs):
super(LikelihoodModel, self).__init__(endog, exog, **kwargs)
self.initialize()
def initialize(self):
"""
Initialize (possibly re-initialize) a Model instance. For
instance, the design matrix of a linear model may change
and some things must be recomputed.
"""
pass
# TODO: if the intent is to re-initialize the model with new data then this
# method needs to take inputs...
def loglike(self, params):
"""
Log-likelihood of model.
"""
raise NotImplementedError
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
"""
raise NotImplementedError
def information(self, params):
"""
Fisher information matrix of model
Returns -Hessian of loglike evaluated at params.
"""
raise NotImplementedError
def hessian(self, params):
"""
The Hessian matrix of the model
"""
raise NotImplementedError
def fit(self, start_params=None, method='newton', maxiter=100,
full_output=True, disp=True, fargs=(), callback=None, retall=False,
skip_hessian=False, **kwargs):
"""
Fit method for likelihood based models
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool, optional
Set to True to print convergence messages.
fargs : tuple, optional
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
skip_hessian : bool, optional
If False (default), then the negative inverse hessian is calculated
after the optimization. If True, then the hessian will not be
calculated. However, it will be available in methods that use the
hessian in the optimization (currently only with `"newton"`).
kwargs : keywords
All kwargs are passed to the chosen solver with one exception. The
following keyword controls what happens after the fit::
warn_convergence : bool, optional
If True, checks the model for the converged flag. If the
converged flag is False, a ConvergenceWarning is issued.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for solvers (see returned Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
This many terms are used for the Hessian approximation.
factr : float
A stop condition that is a variant of relative error.
pgtol : float
A stop condition that uses the projected gradient.
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
maxfun : int
Maximum number of function evaluations to make.
bounds : sequence
(min, max) pairs for each element in x,
defining the bounds on that parameter.
Use None for one of min or max when there is no bound
in that direction.
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
"""
Hinv = None # JP error if full_output=0, Hinv not defined
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
elif self.exog is not None:
# fails for shape (K,)?
start_params = [0] * self.exog.shape[1]
else:
raise ValueError("If exog is None, then start_params should "
"be specified")
# TODO: separate args from nonarg taking score and hessian, ie.,
# user-supplied and numerically evaluated estimate frprime doesn't take
# args in most (any?) of the optimize function
nobs = self.endog.shape[0]
f = lambda params, *args: -self.loglike(params, *args) / nobs
score = lambda params, *args: -self.score(params, *args) / nobs
try:
hess = lambda params, *args: -self.hessian(params, *args) / nobs
except:
hess = None
if method == 'newton':
score = lambda params, *args: self.score(params, *args) / nobs
hess = lambda params, *args: self.hessian(params, *args) / nobs
#TODO: why are score and hess positive?
warn_convergence = kwargs.pop('warn_convergence', True)
optimizer = Optimizer()
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
fargs, kwargs,
hessian=hess,
method=method,
disp=disp,
maxiter=maxiter,
callback=callback,
retall=retall,
full_output=full_output)
#NOTE: this is for fit_regularized and should be generalized
cov_params_func = kwargs.setdefault('cov_params_func', None)
if cov_params_func:
Hinv = cov_params_func(self, xopt, retvals)
elif method == 'newton' and full_output:
Hinv = np.linalg.inv(-retvals['Hessian']) / nobs
elif not skip_hessian:
try:
Hinv = np.linalg.inv(-1 * self.hessian(xopt))
except:
#might want custom warning ResultsWarning? NumericalWarning?
from warnings import warn
warndoc = ('Inverting hessian failed, no bse or '
'cov_params available')
warn(warndoc, RuntimeWarning)
Hinv = None
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
if 'use_t' in kwargs:
kwds['use_t'] = kwargs['use_t']
#prints for debugging
#print('kwargs inLikelihoodModel.fit', kwargs)
#print('kwds inLikelihoodModel.fit', kwds)
#TODO: add Hessian approximation and change the above if needed
mlefit = LikelihoodModelResults(self, xopt, Hinv, scale=1., **kwds)
#TODO: hardcode scale?
if isinstance(retvals, dict):
mlefit.mle_retvals = retvals
if warn_convergence and not retvals['converged']:
from warnings import warn
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warn("Maximum Likelihood optimization failed to converge. "
"Check mle_retvals", ConvergenceWarning)
mlefit.mle_settings = optim_settings
return mlefit
#TODO: the below is unfinished
class GenericLikelihoodModel(LikelihoodModel):
"""
Allows the fitting of any likelihood function via maximum likelihood.
A subclass needs to specify at least the log-likelihood
If the log-likelihood is specified for each observation, then results that
require the Jacobian will be available. (The other case is not tested yet.)
Notes
-----
Optimization methods that require only a likelihood function are 'nm' and
'powell'
Optimization methods that require a likelihood function and a
score/gradient are 'bfgs', 'cg', and 'ncg'. A function to compute the
Hessian is optional for 'ncg'.
Optimization method that require a likelihood function, a score/gradient,
and a Hessian is 'newton'
If they are not overwritten by a subclass, then numerical gradient,
Jacobian and Hessian of the log-likelihood are caclulated by numerical
forward differentiation. This might results in some cases in precision
problems, and the Hessian might not be positive definite. Even if the
Hessian is not positive definite the covariance matrix of the parameter
estimates based on the outer product of the Jacobian might still be valid.
Examples
--------
see also subclasses in directory miscmodels
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog)
# in this dir
from model import GenericLikelihoodModel
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
import numpy as np
np.allclose(res.params, probit_res.params)
"""
def __init__(self, endog, exog=None, loglike=None, score=None,
hessian=None, missing='none', extra_params_names=None,
**kwds):
# let them be none in case user wants to use inheritance
if not loglike is None:
self.loglike = loglike
if not score is None:
self.score = score
if not hessian is None:
self.hessian = hessian
self.__dict__.update(kwds)
# TODO: data structures?
#TODO temporary solution, force approx normal
#self.df_model = 9999
#somewhere: CacheWriteWarning: 'df_model' cannot be overwritten
super(GenericLikelihoodModel, self).__init__(endog, exog,
missing=missing)
# this won't work for ru2nmnl, maybe np.ndim of a dict?
if exog is not None:
#try:
self.nparams = (exog.shape[1] if np.ndim(exog) == 2 else 1)
if extra_params_names is not None:
self._set_extra_params_names(extra_params_names)
def _set_extra_params_names(self, extra_params_names):
# check param_names
if extra_params_names is not None:
if self.exog is not None:
self.exog_names.extend(extra_params_names)
else:
self.data.xnames = extra_params_names
self.nparams = len(self.exog_names)
#this is redundant and not used when subclassing
def initialize(self):
if not self.score: # right now score is not optional
self.score = approx_fprime
if not self.hessian:
pass
else: # can use approx_hess_p if we have a gradient
if not self.hessian:
pass
#Initialize is called by
#statsmodels.model.LikelihoodModel.__init__
#and should contain any preprocessing that needs to be done for a model
from statsmodels.tools import tools
if self.exog is not None:
# assume constant
self.df_model = float(np_matrix_rank(self.exog) - 1)
self.df_resid = (float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
else:
self.df_model = np.nan
self.df_resid = np.nan
super(GenericLikelihoodModel, self).initialize()
def expandparams(self, params):
'''
expand to full parameter array when some parameters are fixed
Parameters
----------
params : array
reduced parameter array
Returns
-------
paramsfull : array
expanded parameter array where fixed parameters are included
Notes
-----
Calling this requires that self.fixed_params and self.fixed_paramsmask
are defined.
*developer notes:*
This can be used in the log-likelihood to ...
this could also be replaced by a more general parameter
transformation.
'''
paramsfull = self.fixed_params.copy()
paramsfull[self.fixed_paramsmask] = params
return paramsfull
def reduceparams(self, params):
return params[self.fixed_paramsmask]
def loglike(self, params):
return self.loglikeobs(params).sum(0)
def nloglike(self, params):
return -self.loglikeobs(params).sum(0)
def loglikeobs(self, params):
return -self.nloglikeobs(params)
def score(self, params):
'''
Gradient of log-likelihood evaluated at params
'''
kwds = {}
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglike, **kwds).ravel()
def score_obs(self, params, **kwds):
'''
Jacobian/Gradient of log-likelihood evaluated at params for each
observation.
'''
#kwds.setdefault('epsilon', 1e-4)
kwds.setdefault('centered', True)
return approx_fprime(params, self.loglikeobs, **kwds)
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7.")
def hessian(self, params):
'''
Hessian of log-likelihood evaluated at params
'''
from statsmodels.tools.numdiff import approx_hess
# need options for hess (epsilon)
return approx_hess(params, self.loglike)
def fit(self, start_params=None, method='nm', maxiter=500, full_output=1,
disp=1, callback=None, retall=0, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.LikelihoodModel.fit
"""
if start_params is None:
if hasattr(self, 'start_params'):
start_params = self.start_params
else:
start_params = 0.1 * np.ones(self.nparams)
fit_method = super(GenericLikelihoodModel, self).fit
mlefit = fit_method(start_params=start_params,
method=method, maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback, **kwargs)
genericmlefit = GenericLikelihoodModelResults(self, mlefit)
#amend param names
exog_names = [] if (self.exog_names is None) else self.exog_names
k_miss = len(exog_names) - len(mlefit.params)
if not k_miss == 0:
if k_miss < 0:
self._set_extra_params_names(
['par%d' % i for i in range(-k_miss)])
else:
# I don't want to raise after we have already fit()
import warnings
warnings.warn('more exog_names than parameters', UserWarning)
return genericmlefit
#fit.__doc__ += LikelihoodModel.fit.__doc__
class Results(object):
"""
Class to contain model results
Parameters
----------
model : class instance
the previously specified model instance
params : array
parameter estimates from the fit model
"""
def __init__(self, model, params, **kwd):
self.__dict__.update(kwd)
self.initialize(model, params, **kwd)
self._data_attr = []
def initialize(self, model, params, **kwd):
self.params = params
self.model = model
if hasattr(model, 'k_constant'):
self.k_constant = model.k_constant
def predict(self, exog=None, transform=True, *args, **kwargs):
"""
Call self.model.predict with self.params as the first argument.
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray or pandas.Series
See self.model.predict
"""
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
return self.model.predict(self.params, exog, *args, **kwargs)
#TODO: public method?
class LikelihoodModelResults(Results):
"""
Class to contain results from likelihood models
Parameters
-----------
model : LikelihoodModel instance or subclass instance
LikelihoodModelResults holds a reference to the model that is fit.
params : 1d array_like
parameter estimates from estimated model
normalized_cov_params : 2d array
Normalized (before scaling) covariance of params. (dot(X.T,X))**-1
scale : float
For (some subset of models) scale will typically be the
mean square error from the estimated model (sigma^2)
Returns
-------
**Attributes**
mle_retvals : dict
Contains the values returned from the chosen optimization method if
full_output is True during the fit. Available only if the model
is fit by maximum likelihood. See notes below for the output from
the different methods.
mle_settings : dict
Contains the arguments passed to the chosen optimization method.
Available if the model is fit by maximum likelihood. See
LikelihoodModel.fit for more information.
model : model instance
LikelihoodResults contains a reference to the model that is fit.
params : ndarray
The parameters estimated for the model.
scale : float
The scaling factor of the model given during instantiation.
tvalues : array
The t-values of the standard errors.
Notes
-----
The covariance of params is given by scale times normalized_cov_params.
Return values by solver if full_output is True during fit:
'newton'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
score : ndarray
The score vector at the optimum.
Hessian : ndarray
The Hessian at the optimum.
warnflag : int
1 if maxiter is exceeded. 0 if successful convergence.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'nm'
fopt : float
The value of the (negative) loglikelihood at its
minimum.
iterations : int
Number of iterations performed.
warnflag : int
1: Maximum number of function evaluations made.
2: Maximum number of iterations reached.
converged : bool
True: converged. False: did not converge.
allvecs : list
List of solutions at each iteration.
'bfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
Hinv : ndarray
value of the inverse Hessian matrix at minimum. Note
that this is just an approximation and will often be
different from the value of the analytic Hessian.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient
and/or function calls are not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'lbfgs'
fopt : float
Value of the (negative) loglikelihood at its minimum.
gopt : float
Value of gradient at minimum, which should be near 0.
fcalls : int
Number of calls to loglike.
warnflag : int
Warning flag:
- 0 if converged
- 1 if too many function evaluations or too many iterations
- 2 if stopped for another reason
converged : bool
True: converged. False: did not converge.
'powell'
fopt : float
Value of the (negative) loglikelihood at its minimum.
direc : ndarray
Current direction set.
iterations : int
Number of iterations performed.
fcalls : int
Number of calls to loglike.
warnflag : int
1: Maximum number of function evaluations. 2: Maximum number
of iterations.
converged : bool
True : converged. False: did not converge.
allvecs : list
Results at each iteration.
'cg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
warnflag : int
1: Maximum number of iterations exceeded. 2: Gradient and/
or function calls not changing.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
'ncg'
fopt : float
Value of the (negative) loglikelihood at its minimum.
fcalls : int
Number of calls to loglike.
gcalls : int
Number of calls to gradient/score.
hcalls : int
Number of calls to hessian.
warnflag : int
1: Maximum number of iterations exceeded.
converged : bool
True: converged. False: did not converge.
allvecs : list
Results at each iteration.
"""
# by default we use normal distribution
# can be overwritten by instances or subclasses
use_t = False
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
**kwargs):
super(LikelihoodModelResults, self).__init__(model, params)
self.normalized_cov_params = normalized_cov_params
self.scale = scale
# robust covariance
# We put cov_type in kwargs so subclasses can decide in fit whether to
# use this generic implementation
if 'use_t' in kwargs:
use_t = kwargs['use_t']
if use_t is not None:
self.use_t = use_t
if 'cov_type' in kwargs:
cov_type = kwargs.get('cov_type', 'nonrobust')
cov_kwds = kwargs.get('cov_kwds', {})
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
use_t = self.use_t
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
def normalized_cov_params(self):
raise NotImplementedError
def _get_robustcov_results(self, cov_type='nonrobust', use_self=True,
use_t=None, **cov_kwds):
from statsmodels.base.covtype import get_robustcov_results
if cov_kwds is None:
cov_kwds = {}
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
# TODO: we shouldn't need use_t in get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def tvalues(self):
"""
Return the t-statistic for a given parameter estimate.
"""
return self.params / self.bse
@cache_readonly
def pvalues(self):
if self.use_t:
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
return stats.t.sf(np.abs(self.tvalues), df_resid)*2
else:
return stats.norm.sf(np.abs(self.tvalues))*2
def cov_params(self, r_matrix=None, column=None, scale=None, cov_p=None,
other=None):
"""
Returns the variance/covariance matrix.
The variance/covariance matrix can be of a linear contrast
of the estimates of params or all params multiplied by scale which
will usually be an estimate of sigma^2. Scale is assumed to be
a scalar.
Parameters
----------
r_matrix : array-like
Can be 1d, or 2d. Can be used alone or with other.
column : array-like, optional
Must be used on its own. Can be 0d or 1d see below.
scale : float, optional
Can be specified or not. Default is None, which means that
the scale argument is taken from the model.
other : array-like, optional
Can be used when r_matrix is specified.
Returns
-------
cov : ndarray
covariance matrix of the parameter estimates or of linear
combination of parameter estimates. See Notes.
Notes
-----
(The below are assumed to be in matrix notation.)
If no argument is specified returns the covariance matrix of a model
``(scale)*(X.T X)^(-1)``
If contrast is specified it pre and post-multiplies as follows
``(scale) * r_matrix (X.T X)^(-1) r_matrix.T``
If contrast and other are specified returns
``(scale) * r_matrix (X.T X)^(-1) other.T``
If column is specified returns
``(scale) * (X.T X)^(-1)[column,column]`` if column is 0d
OR
``(scale) * (X.T X)^(-1)[column][:,column]`` if column is 1d
"""
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
dot_fun = nan_dot
else:
dot_fun = np.dot
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'(unnormalized) covariances')
if column is not None and (r_matrix is not None or other is not None):
raise ValueError('Column should be specified without other '
'arguments.')
if other is not None and r_matrix is None:
raise ValueError('other can only be specified with r_matrix')
if cov_p is None:
if hasattr(self, 'cov_params_default'):
cov_p = self.cov_params_default
else:
if scale is None:
scale = self.scale
cov_p = self.normalized_cov_params * scale
if column is not None:
column = np.asarray(column)
if column.shape == ():
return cov_p[column, column]
else:
#return cov_p[column][:, column]
return cov_p[column[:, None], column]
elif r_matrix is not None:
r_matrix = np.asarray(r_matrix)
if r_matrix.shape == ():
raise ValueError("r_matrix should be 1d or 2d")
if other is None:
other = r_matrix
else:
other = np.asarray(other)
tmp = dot_fun(r_matrix, dot_fun(cov_p, np.transpose(other)))
return tmp
else: # if r_matrix is None and column is None:
return cov_p
#TODO: make sure this works as needed for GLMs
def t_test(self, r_matrix, cov_p=None, scale=None,
use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array-like, str, tuple
- array : If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q). If q is given,
can be either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> r = np.zeros_like(results.params)
>>> r[5:] = [1,-1]
>>> print(r)
[ 0. 0. 0. 0. 0. 1. -1.]
r tests that the coefficients on the 5th and 6th independent
variable are the same.
>>> T_test = results.t_test(r)
>>> print(T_test)
<T contrast: effect=-1829.2025687192481, sd=455.39079425193762,
t=-4.0167754636411717, p=0.0015163772380899498, df_denom=9>
>>> T_test.effect
-1829.2025687192481
>>> T_test.sd
455.39079425193762
>>> T_test.tvalue
-4.0167754636411717
>>> T_test.pvalue
0.0015163772380899498
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.formula.api import ols
>>> dta = sm.datasets.longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = 'GNPDEFL = GNP, UNEMP = 2, YEAR/1829 = 1'
>>> t_test = results.t_test(hypotheses)
>>> print(t_test)
See Also
---------
tvalues : individual t statistics
f_test : for F tests
patsy.DesignInfo.linear_constraint
"""
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
num_ttests = r_matrix.shape[0]
num_params = r_matrix.shape[1]
if (cov_p is None and self.normalized_cov_params is None and
not hasattr(self, 'cov_params_default')):
raise ValueError('Need covariance of parameters for computing '
'T statistics')
if num_params != self.params.shape[0]:
raise ValueError('r_matrix and params are not aligned')
if q_matrix is None:
q_matrix = np.zeros(num_ttests)
else:
q_matrix = np.asarray(q_matrix)
q_matrix = q_matrix.squeeze()
if q_matrix.size > 1:
if q_matrix.shape[0] != num_ttests:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
if use_t is None:
#switch to use_t false if undefined
use_t = (hasattr(self, 'use_t') and self.use_t)
_t = _sd = None
_effect = np.dot(r_matrix, self.params)
# nan_dot multiplies with the convention nan * 0 = 0
# Perform the test
if num_ttests > 1:
_sd = np.sqrt(np.diag(self.cov_params(
r_matrix=r_matrix, cov_p=cov_p)))
else:
_sd = np.sqrt(self.cov_params(r_matrix=r_matrix, cov_p=cov_p))
_t = (_effect - q_matrix) * recipr(_sd)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_t:
return ContrastResults(effect=_effect, t=_t, sd=_sd,
df_denom=df_resid)
else:
return ContrastResults(effect=_effect, statistic=_t, sd=_sd,
df_denom=df_resid,
distribution='norm')
def f_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None):
"""
Compute the F-test for a joint linear hypothesis.
This is a special case of `wald_test` that always uses the F
distribution.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed
that the linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length k row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> A = np.identity(len(results.params))
>>> A = A[1:,:]
This tests that each coefficient is jointly statistically
significantly different from zero.
>>> print(results.f_test(A))
<F contrast: F=330.28533923463488, p=4.98403052872e-10,
df_denom=9, df_num=6>
Compare this to
>>> results.fvalue
330.2853392346658
>>> results.f_pvalue
4.98403096572e-10
>>> B = np.array(([0,0,1,-1,0,0,0],[0,0,0,0,0,1,-1]))
This tests that the coefficient on the 2nd and 3rd regressors are
equal and jointly that the coefficient on the 5th and 6th regressors
are equal.
>>> print(results.f_test(B))
<F contrast: F=9.740461873303655, p=0.00560528853174, df_denom=9,
df_num=2>
Alternatively, you can specify the hypothesis tests using a string
>>> from statsmodels.datasets import longley
>>> from statsmodels.formula.api import ols
>>> dta = longley.load_pandas().data
>>> formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
>>> results = ols(formula, dta).fit()
>>> hypotheses = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
>>> f_test = results.f_test(hypotheses)
>>> print(f_test)
See Also
--------
statsmodels.stats.contrast.ContrastResults
wald_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
res = self.wald_test(r_matrix, cov_p=cov_p, scale=scale,
invcov=invcov, use_f=True)
return res
#TODO: untested for GLMs?
def wald_test(self, r_matrix, cov_p=None, scale=1.0, invcov=None,
use_f=None):
"""
Compute a Wald-test for a joint linear hypothesis.
Parameters
----------
r_matrix : array-like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
cov_p : array-like, optional
An alternative estimate for the parameter covariance matrix.
If None is given, self.normalized_cov_params is used.
scale : float, optional
Default is 1.0 for no scaling.
invcov : array-like, optional
A q x q array to specify an inverse covariance matrix based on a
restrictions matrix.
use_f : bool
If True, then the F-distribution is used. If False, then the
asymptotic distribution, chisquare is used. If use_f is None, then
the F distribution is used if the model specifies that use_t is True.
The test statistic is proportionally adjusted for the distribution
by the number of constraints in the hypothesis.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
See also
--------
statsmodels.stats.contrast.ContrastResults
f_test
t_test
patsy.DesignInfo.linear_constraint
Notes
-----
The matrix `r_matrix` is assumed to be non-singular. More precisely,
r_matrix (pX pX.T) r_matrix.T
is assumed invertible. Here, pX is the generalized inverse of the
design matrix of the model. There can be problems in non-OLS models
where the rank of the covariance of the noise is not full.
"""
if use_f is None:
#switch to use_t false if undefined
use_f = (hasattr(self, 'use_t') and self.use_t)
from patsy import DesignInfo
names = self.model.data.param_names
LC = DesignInfo(names).linear_constraint(r_matrix)
r_matrix, q_matrix = LC.coefs, LC.constants
if (self.normalized_cov_params is None and cov_p is None and
invcov is None and not hasattr(self, 'cov_params_default')):
raise ValueError('need covariance of parameters for computing '
'F statistics')
cparams = np.dot(r_matrix, self.params[:, None])
J = float(r_matrix.shape[0]) # number of restrictions
if q_matrix is None:
q_matrix = np.zeros(J)
else:
q_matrix = np.asarray(q_matrix)
if q_matrix.ndim == 1:
q_matrix = q_matrix[:, None]
if q_matrix.shape[0] != J:
raise ValueError("r_matrix and q_matrix must have the same "
"number of rows")
Rbq = cparams - q_matrix
if invcov is None:
cov_p = self.cov_params(r_matrix=r_matrix, cov_p=cov_p)
if np.isnan(cov_p).max():
raise ValueError("r_matrix performs f_test for using "
"dimensions that are asymptotically "
"non-normal")
invcov = np.linalg.inv(cov_p)
if (hasattr(self, 'mle_settings') and
self.mle_settings['optimizer'] in ['l1', 'l1_cvxopt_cp']):
F = nan_dot(nan_dot(Rbq.T, invcov), Rbq)
else:
F = np.dot(np.dot(Rbq.T, invcov), Rbq)
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
if use_f:
F /= J
return ContrastResults(F=F, df_denom=df_resid,
df_num=invcov.shape[0])
else:
return ContrastResults(chi2=F, df_denom=J, statistic=F,
distribution='chi2', distargs=(J,))
def wald_test_terms(self, skip_single=False, extra_constraints=None,
combine_terms=None):
"""
Compute a sequence of Wald tests for terms over multiple columns
This computes joined Wald tests for the hypothesis that all
coefficients corresponding to a `term` are zero.
`Terms` are defined by the underlying formula or by string matching.
Parameters
----------
skip_single : boolean
If true, then terms that consist only of a single column and,
therefore, refers only to a single parameter is skipped.
If false, then all terms are included.
extra_constraints : ndarray
not tested yet
combine_terms : None or list of strings
Each string in this list is matched to the name of the terms or
the name of the exogenous variables. All columns whose name
includes that string are combined in one joint test.
Returns
-------
test_result : result instance
The result instance contains `table` which is a pandas DataFrame
with the test results: test statistic, degrees of freedom and
pvalues.
Examples
--------
>>> res_ols = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
>>> res_ols.wald_test_terms()
<class 'statsmodels.stats.contrast.WaldTestResults'>
F P>F df constraint df denom
Intercept 279.754525 2.37985521351e-22 1 51
C(Duration, Sum) 5.367071 0.0245738436636 1 51
C(Weight, Sum) 12.432445 3.99943118767e-05 2 51
C(Duration, Sum):C(Weight, Sum) 0.176002 0.83912310946 2 51
>>> res_poi = Poisson.from_formula("Days ~ C(Weight) * C(Duration)",
data).fit(cov_type='HC0')
>>> wt = res_poi.wald_test_terms(skip_single=False,
combine_terms=['Duration', 'Weight'])
>>> print(wt)
chi2 P>chi2 df constraint
Intercept 15.695625 7.43960374424e-05 1
C(Weight) 16.132616 0.000313940174705 2
C(Duration) 1.009147 0.315107378931 1
C(Weight):C(Duration) 0.216694 0.897315972824 2
Duration 11.187849 0.010752286833 3
Weight 30.263368 4.32586407145e-06 4
"""
# lazy import
from collections import defaultdict
result = self
if extra_constraints is None:
extra_constraints = []
if combine_terms is None:
combine_terms = []
design_info = getattr(result.model.data.orig_exog, 'design_info', None)
if design_info is None and extra_constraints is None:
raise ValueError('no constraints, nothing to do')
identity = np.eye(len(result.params))
constraints = []
combined = defaultdict(list)
if design_info is not None:
for term in design_info.terms:
cols = design_info.slice(term)
name = term.name()
constraint_matrix = identity[cols]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
k_constraint = constraint_matrix.shape[0]
if skip_single:
if k_constraint == 1:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
else:
# check by exog/params names if there is no formula info
for col, name in enumerate(result.model.exog_names):
constraint_matrix = identity[col]
# check if in combined
for cname in combine_terms:
if cname in name:
combined[cname].append(constraint_matrix)
if skip_single:
continue
constraints.append((name, constraint_matrix))
combined_constraints = []
for cname in combine_terms:
combined_constraints.append((cname, np.vstack(combined[cname])))
use_t = result.use_t
distribution = ['chi2', 'F'][use_t]
res_wald = []
index = []
for name, constraint in constraints + combined_constraints + extra_constraints:
wt = result.wald_test(constraint)
row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]]
if use_t:
row.append(wt.df_denom)
res_wald.append(row)
index.append(name)
# distribution nerutral names
col_names = ['statistic', 'pvalue', 'df_constraint']
if use_t:
col_names.append('df_denom')
# TODO: maybe move DataFrame creation to results class
from pandas import DataFrame
table = DataFrame(res_wald, index=index, columns=col_names)
res = WaldTestResults(None, distribution, None, table=table)
# TODO: remove temp again, added for testing
res.temp = constraints + combined_constraints + extra_constraints
return res
def conf_int(self, alpha=.05, cols=None, method='default'):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
method : string
Not Implemented Yet
Method to estimate the confidence_interval.
"Default" : uses self.bse which is based on inverse Hessian for MLE
"hjjh" :
"jac" :
"boot-bse"
"boot_quant"
"profile"
Returns
--------
conf_int : array
Each row contains [lower, upper] limits of the confidence interval
for the corresponding parameter. The first column contains all
lower, the second column contains all upper limits.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> results = sm.OLS(data.endog, data.exog).fit()
>>> results.conf_int()
array([[-5496529.48322745, -1467987.78596704],
[ -177.02903529, 207.15277984],
[ -0.1115811 , 0.03994274],
[ -3.12506664, -0.91539297],
[ -1.5179487 , -0.54850503],
[ -0.56251721, 0.460309 ],
[ 798.7875153 , 2859.51541392]])
>>> results.conf_int(cols=(2,3))
array([[-0.1115811 , 0.03994274],
[-3.12506664, -0.91539297]])
Notes
-----
The confidence interval is based on the standard normal distribution.
Models wish to use a different distribution should overwrite this
method.
"""
bse = self.bse
if self.use_t:
dist = stats.t
df_resid = getattr(self, 'df_resid_inference', self.df_resid)
q = dist.ppf(1 - alpha / 2, df_resid)
else:
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = self.params[cols] - q * bse[cols]
upper = self.params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def save(self, fname, remove_data=False):
'''
save a pickle of this instance
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
remove_data : bool
If False (default), then the instance is pickled without changes.
If True, then all arrays with length nobs are set to None before
pickling. See the remove_data method.
In some cases not all arrays will be set to None.
Notes
-----
If remove_data is true and the model result does not implement a
remove_data method then this will raise an exception.
'''
from statsmodels.iolib.smpickle import save_pickle
if remove_data:
self.remove_data()
save_pickle(self, fname)
@classmethod
def load(cls, fname):
'''
load a pickle, (class method)
Parameters
----------
fname : string or filehandle
fname can be a string to a file path or filename, or a filehandle.
Returns
-------
unpickled instance
'''
from statsmodels.iolib.smpickle import load_pickle
return load_pickle(fname)
def remove_data(self):
'''remove data arrays, all nobs arrays from result and model
This reduces the size of the instance, so it can be pickled with less
memory. Currently tested for use with predict from an unpickled
results and model instance.
.. warning:: Since data and some intermediate results have been removed
calculating new statistics that require them will raise exceptions.
The exception will occur the first time an attribute is accessed
that has been set to None.
Not fully tested for time series models, tsa, and might delete too much
for prediction or not all that would be possible.
The list of arrays to delete is maintained as an attribute of the
result and model instance, except for cached values. These lists could
be changed before calling remove_data.
'''
def wipe(obj, att):
#get to last element in attribute path
p = att.split('.')
att_ = p.pop(-1)
try:
obj_ = reduce(getattr, [obj] + p)
#print(repr(obj), repr(att))
#print(hasattr(obj_, att_))
if hasattr(obj_, att_):
#print('removing3', att_)
setattr(obj_, att_, None)
except AttributeError:
pass
model_attr = ['model.' + i for i in self.model._data_attr]
for att in self._data_attr + model_attr:
#print('removing', att)
wipe(self, att)
data_in_cache = getattr(self, 'data_in_cache', [])
data_in_cache += ['fittedvalues', 'resid', 'wresid']
for key in data_in_cache:
try:
self._cache[key] = None
except (AttributeError, KeyError):
pass
class LikelihoodResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
'bse': 'columns',
'pvalues': 'columns',
'tvalues': 'columns',
'resid': 'rows',
'fittedvalues': 'rows',
'normalized_cov_params': 'cov',
}
_wrap_attrs = _attrs
_wrap_methods = {
'cov_params': 'cov',
'conf_int': 'columns'
}
wrap.populate_wrapper(LikelihoodResultsWrapper,
LikelihoodModelResults)
class ResultMixin(object):
@cache_readonly
def df_modelwc(self):
# collect different ways of defining the number of parameters, used for
# aic, bic
if hasattr(self, 'df_model'):
if hasattr(self, 'hasconst'):
hasconst = self.hasconst
else:
# default assumption
hasconst = 1
return self.df_model + hasconst
else:
return self.params.size
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * (self.df_modelwc)
@cache_readonly
def bic(self):
return -2 * self.llf + np.log(self.nobs) * (self.df_modelwc)
@cache_readonly
def score_obsv(self):
'''cached Jacobian of log-likelihood
'''
return self.model.score_obs(self.params)
jacv = np.deprecate(score_obsv, 'jacv', 'score_obsv',
"Use score_obsv attribute."
" jacv will be removed in 0.7.")
@cache_readonly
def hessv(self):
'''cached Hessian of log-likelihood
'''
return self.model.hessian(self.params)
@cache_readonly
def covjac(self):
'''
covariance of parameters based on outer product of jacobian of
log-likelihood
'''
## if not hasattr(self, '_results'):
## raise ValueError('need to call fit first')
## #self.fit()
## self.jacv = jacv = self.jac(self._results.params)
jacv = self.score_obsv
return np.linalg.inv(np.dot(jacv.T, jacv))
@cache_readonly
def covjhj(self):
'''covariance of parameters based on HJJH
dot product of Hessian, Jacobian, Jacobian, Hessian of likelihood
name should be covhjh
'''
jacv = self.score_obsv
hessv = self.hessv
hessinv = np.linalg.inv(hessv)
## self.hessinv = hessin = self.cov_params()
return np.dot(hessinv, np.dot(np.dot(jacv.T, jacv), hessinv))
@cache_readonly
def bsejhj(self):
'''standard deviation of parameter estimates based on covHJH
'''
return np.sqrt(np.diag(self.covjhj))
@cache_readonly
def bsejac(self):
'''standard deviation of parameter estimates based on covjac
'''
return np.sqrt(np.diag(self.covjac))
def bootstrap(self, nrep=100, method='nm', disp=0, store=1):
"""simple bootstrap to get mean and variance of estimator
see notes
Parameters
----------
nrep : int
number of bootstrap replications
method : str
optimization method to use
disp : bool
If true, then optimization prints results
store : bool
If true, then parameter estimates for all bootstrap iterations
are attached in self.bootstrap_results
Returns
-------
mean : array
mean of parameter estimates over bootstrap replications
std : array
standard deviation of parameter estimates over bootstrap
replications
Notes
-----
This was mainly written to compare estimators of the standard errors of
the parameter estimates. It uses independent random sampling from the
original endog and exog, and therefore is only correct if observations
are independently distributed.
This will be moved to apply only to models with independently
distributed observations.
"""
results = []
print(self.model.__class__)
hascloneattr = True if hasattr(self, 'cloneattr') else False
for i in range(nrep):
rvsind = np.random.randint(self.nobs, size=self.nobs)
#this needs to set startparam and get other defining attributes
#need a clone method on model
fitmod = self.model.__class__(self.endog[rvsind],
self.exog[rvsind, :])
if hascloneattr:
for attr in self.model.cloneattr:
setattr(fitmod, attr, getattr(self.model, attr))
fitres = fitmod.fit(method=method, disp=disp)
results.append(fitres.params)
results = np.array(results)
if store:
self.bootstrap_results = results
return results.mean(0), results.std(0), results
def get_nlfun(self, fun):
#I think this is supposed to get the delta method that is currently
#in miscmodels count (as part of Poisson example)
pass
class GenericLikelihoodModelResults(LikelihoodModelResults, ResultMixin):
"""
A results class for the discrete dependent variable models.
..Warning :
The following description has not been updated to this version/class.
Where are AIC, BIC, ....? docstring looks like copy from discretemod
Parameters
----------
model : A DiscreteModel instance
mlefit : instance of LikelihoodResults
This contains the numerical optimization results as returned by
LikelihoodModel.fit(), in a superclass of GnericLikelihoodModels
Returns
-------
*Attributes*
Warning most of these are not available yet
aic : float
Akaike information criterion. -2*(`llf` - p) where p is the number
of regressors including the intercept.
bic : float
Bayesian information criterion. -2*`llf` + ln(`nobs`)*p where p is the
number of regressors including the intercept.
bse : array
The standard errors of the coefficients.
df_resid : float
See model definition.
df_model : float
See model definition.
fitted_values : array
Linear predictor XB.
llf : float
Value of the loglikelihood
llnull : float
Value of the constant-only loglikelihood
llr : float
Likelihood ratio chi-squared statistic; -2*(`llnull` - `llf`)
llr_pvalue : float
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
prsquared : float
McFadden's pseudo-R-squared. 1 - (`llf`/`llnull`)
"""
def __init__(self, model, mlefit):
self.model = model
self.endog = model.endog
self.exog = model.exog
self.nobs = model.endog.shape[0]
# TODO: possibly move to model.fit()
# and outsource together with patching names
if hasattr(model, 'df_model'):
self.df_model = model.df_model
else:
self.df_model = len(mlefit.params)
# retrofitting the model, used in t_test TODO: check design
self.model.df_model = self.df_model
if hasattr(model, 'df_resid'):
self.df_resid = model.df_resid
else:
self.df_resid = self.endog.shape[0] - self.df_model
# retrofitting the model, used in t_test TODO: check design
self.model.df_resid = self.df_resid
self._cache = resettable_cache()
self.__dict__.update(mlefit.__dict__)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Maximum Likelihood']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), # [self.df_resid]),
('Df Model:', None), # [self.df_model])
]
top_right = [ # ('R-squared:', ["%#8.3f" % self.rsquared]),
# ('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
# ('F-statistic:', ["%#8.4g" % self.fvalue] ),
# ('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None), # ["%#6.4g" % self.llf]),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=False)
return smry
| bsd-3-clause |
dymkowsk/mantid | Framework/PythonInterface/plugins/algorithms/StringToPng.py | 1 | 1866 | #pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
from six import u
import mantid
class StringToPng(mantid.api.PythonAlgorithm):
def category(self):
""" Category
"""
return "DataHandling\\Plots"
def name(self):
""" Algorithm name
"""
return "StringToPng"
def summary(self):
return "Creates an image file containing a string."
def checkGroups(self):
return False
def PyInit(self):
#declare properties
self.declareProperty("String","", mantid.kernel.StringMandatoryValidator(),"String to plot")
self.declareProperty(mantid.api.FileProperty('OutputFilename', '', action=mantid.api.FileAction.Save, extensions = ["png"]),
doc='Name of the image file to savefile.')
def PyExec(self):
ok2run=''
try:
import matplotlib
except ImportError:
ok2run='Problem importing matplotlib'
from distutils.version import LooseVersion
if LooseVersion(matplotlib.__version__)<LooseVersion("1.2.0"):
ok2run='Wrong version of matplotlib. Required >= 1.2.0'
if ok2run!='':
raise RuntimeError(ok2run)
matplotlib.use("agg")
import matplotlib.pyplot as plt
fig=plt.figure(figsize=(.1,.1))
ax1=plt.axes(frameon=False)
ax1.text(0.,1,bytearray(u(self.getProperty("String").valueAsStr), 'utf-8').decode('unicode_escape'),va="center",fontsize=16)
ax1.axes.get_xaxis().set_visible(False)
ax1.axes.get_yaxis().set_visible(False)
plt.show()
filename = self.getProperty("OutputFilename").value
plt.savefig(filename,bbox_inches='tight')
plt.close(fig)
mantid.api.AlgorithmFactory.subscribe(StringToPng)
| gpl-3.0 |
rpalovics/Alpenglow | python/test_alpenglow/evaluation/test_DcgScore.py | 2 | 4634 | import alpenglow as prs
import alpenglow.Getter as rs
import alpenglow.evaluation
import pandas as pd
import math
import unittest
class TestDcgScore(unittest.TestCase):
def test_dcgScore(self):
ranks = [102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 65, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 18, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 67, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 58, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 102, 100, 100, 100, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 47, 102, 102, 102, 100, 100, 102, 102, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 54, 100, 102, 100, 100, 102, 102, 100, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 100, 102, 100, 100, 47, 102, 100, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 100, 100, 100, 102, 100, 102, 102, 100, 102, 102, 100, 100, 37, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 102, 55, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 102, 102, 100, 102, 102, 102, 102, 102, 22, 102, 100, 100, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 100, 100, 102, 100, 102, 102, 100, 100, 100, 102, 102, 100, 100, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 102, 102, 100, 100, 102, 102, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 21, 100, 102, 100, 102, 100, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 102, 100, 102, 102, 102, 100, 100, 102, 102, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 102, 102, 100, 102, 100, 102, 100, 100, 102, 100, 102, 100, 100, 100, 102, 102, 102, 102, 102, 100, 102, 100, 102, 102, 100, 102, 102, 102, 100, 102, 100, 102, 102, 102, 100, 102, 102, 102, 102, 102, 100, 102, 100, 100, 100, 102, 100, 102, 100, 102, 102, 100, 100, 100, 100, 100, 100, 102, 88, 102, 102, 102, 100, 102, 100, 100, 102, 100, 102, 102, 100, 102, 102, 102, 102, 102, 102, 100, 102, 102, 102, 100, 100, 100, 100, 102, 100, 100, 102, 102, 100, 102, 100, 102, 102, 102, 100, 102, 102, 100, 102, 14, 102, 100, 102, 102, 102, 102, 100, 102, 100, 100, 100, 102, 100, 102, 100, 102, 102, 102, 102, 102, 100, 100, 100, 100, 102, 102, 102, 102, 102, 102, 100, 102, 100, 100, 102, 100, 100, 102, 102, 100, 100, 100, 102, 100, 102, 102, 102, 102, 102, 102, 102, 100, 100, 102, 102, 100, 102, 102, 100, 100, 102, 102, 100, 102, 100, 102, 102, 100]
facRankings = pd.DataFrame.from_records(
[
(i, i, 0, r + 1 if r < 100 else None)
for i, r in enumerate(ranks)
],
columns=["id", "time", "prediction", "rank"]
).set_index("id")
facRankings.top_k = 100
dcg = alpenglow.evaluation.DcgScore(facRankings).mean()
dcgs = [math.log(2) / math.log(r + 2) if r < 100 else 0 for r in ranks]
self.assertAlmostEqual(dcg, sum(dcgs) / len(dcgs))
| apache-2.0 |
jmontgom10/PRISM_pyBDP | 02_buildCalibration.py | 2 | 14442 | #This scirpt will build the master calibration fields
#==========
#MasterBias
#MasterDark
#MasterFlat
#Import whatever modules will be used
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.table import Column
from astropy.io import fits, ascii
from scipy import stats
# Import AstroImage
import astroimage as ai
# Add the header handler to the BaseImage class
from PRISM_header_handler import PRISM_header_handler
ai.BaseImage.set_header_handler(PRISM_header_handler)
#Setup the path delimeter for this operating system
delim = os.path.sep
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of the raw data for the observing run
rawDir = 'C:\\Users\\Jordan\\FITS Data\\PRISM_data\\raw_data\\201612\\'
# Define the path to the parent directory for all pyBDP products
pyBDP_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyBDP_data\\201612\\'
# Define the directory into which the average calibration images will be placed
calibrationDir = os.path.join(pyBDP_data, 'master_calibration_images')
if (not os.path.isdir(calibrationDir)):
os.mkdir(calibrationDir, 0o755)
# Read the fileIndex back in as an astropy Table
print('\nReading file index from disk')
indexFile = os.path.join(pyBDP_data, 'rawFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
# Locate where the bias, dark, flat, and science images are in the index
biasBool = (fileIndex['OBSTYPE'] == 'BIAS')
darkBool = (fileIndex['OBSTYPE'] == 'DARK')
flatBool = (fileIndex['OBSTYPE'] == 'FLAT')
sciBool = (fileIndex['OBSTYPE'] == 'OBJECT')
# Extract lists of the waveband, polaroid angle, binning, and lights-on/off
waveBand = fileIndex['FILTER']
polAng = fileIndex['POLPOS']
binning = fileIndex['BINNING']
#==============================================================================
# ***************************** BIAS *****************************************
# Setup the paths to the bias images and compute the bias map
#==============================================================================
# Find the number of unique binnings used in biases
uniqBins = np.unique(binning).astype(int)
masterBiasDict = {}
for thisBin in uniqBins:
# Construct the filename for this bias.
masterBiasFilename = 'MasterBias{0:g}.fits'.format(thisBin)
masterBiasFilename = os.path.join(calibrationDir, masterBiasFilename)
# Test if there is a MasterBias image for this binnng level.
if os.path.isfile(masterBiasFilename):
# Read in the file if it exists
print('\nLoading file into masterBias list')
print(masterBiasFilename)
masterBias = ai.MasterBias.read(masterBiasFilename)
masterBias = masterBias.astype(np.float32)
masterBiasDict.update({thisBin: masterBias})
# If a masterBias was found, then berak out of the loop
continue
# Locate the raw bias images with this binning
thisBiasBinBool = np.logical_and(biasBool, (binning == thisBin))
thisBiasBinInds = np.where(thisBiasBinBool)
biasImgFiles = fileIndex['Filename'][thisBiasBinInds]
# Otherwise proceed to read in and process a masterBias image
print('\nProcessing {0} biases with ({1:g}x{1:g}) binning'.format(
biasImgFiles.size, thisBin))
# Loop through each of the files and add them to the biasImgList list
biasImgList = []
for filename in biasImgFiles:
# Read the raw bias image from the disk
rawBias = ai.RawBias.read(filename)
# Append the raw bias (overscan corrected) image to the list of biases
biasImgList.append(rawBias)
# Construct an ImageStack out of the bias image list
biasStack = ai.ImageStack(biasImgList)
# Compute the master bias
masterBias = biasStack.combine_images()
# Store the master bias in the dictionary of colibration data
masterBiasDict.update({thisBin: masterBias})
# Write masterBias object to disk
masterBias.write(masterBiasFilename, dtype=np.float32, clobber=True)
print('\nThe mean bias level is {0:g} counts\n'.
format(masterBias.data.mean()))
# Do a quick cleanup to make sure that memory survives
del biasStack
del masterBias
#==============================================================================
# ***************************** DARKS *****************************************
# Setup the paths to the dark images and compute the dark current map
#==============================================================================
masterDarkDict = {}
for thisBin in uniqBins:
# Construct the filename for this dark.
masterDarkFilename = 'MasterDark{0:g}.fits'.format(thisBin)
masterDarkFilename = os.path.join(calibrationDir, masterDarkFilename)
# Test if there is a MasterDark image for this binnng level.
if os.path.isfile(masterDarkFilename):
# Read in the file if it exists
print('\nLoading file into masterDark list')
print(masterDarkFilename)
masterDark = ai.MasterDark.read(masterDarkFilename)
masterDark = masterDark.astype(np.float32)
masterDarkDict.update({thisBin: masterDark})
# If a masterDark was found, then continue out of the loop
continue
# Locate the raw dark images with this binning
thisDarkBinBool = np.logical_and(darkBool, (binning == thisBin))
thisDarkBinInds = np.where(thisDarkBinBool)
darkImgFiles = fileIndex['Filename'][thisDarkBinInds]
# Otherwise continue to read in and process a masterDark image
print('\nProcessing {0} darks with ({1:g}x{1:g}) binning'.format(
darkImgFiles.size, thisBin))
# Loop through each of the files and add them to the darkImgList list
darkImgList = []
for filename in darkImgFiles:
# Read the raw dark image from disk
rawDark = ai.RawDark.read(filename)
# Apply the basic data processing
reducedDark = rawDark.process_image(
bias=masterBiasDict[thisBin]
)
# Divide the reduced dark by its own exposure time
# (just to make sure that all darks have the same properties)
reducedDark = reducedDark.divide_by_expTime()
# Append the reduced dark to the list of dark images
darkImgList.append(reducedDark)
# Construct an ImageStack out of the dark image list
darkStack = ai.ImageStack(darkImgList)
# Compute the master dark
masterDark = darkStack.combine_images()
# Convert to a 32 bit float
masterDark = masterDark.astype(np.float32)
# Store the master dark in the dictionary of colibration data
masterDarkDict.update({thisBin: masterDark})
# Write masterDark object to disk
masterDark.write(masterDarkFilename, dtype=np.float32, clobber=True)
print('\nThe mean dark level is {0:g} counts\n'.
format(masterDark.data.mean()))
# Do a quick cleanup to make sure that memory survives
del darkStack
del masterDark
#==============================================================================
# ***************************** FLATS *****************************************
# Setup the paths to the flat images and compute the flat map
#==============================================================================
# Find the number of unique wavebands used in flats
uniqBands = np.unique(waveBand)
# Create an empty dictionary to store the masterFlatDict,
# keyed to each band/polAng/binning combination
masterFlatDict = {}
#Loop through each waveband
for thisBand in uniqBands:
# Compute the unique values for the polaroid rotation angle
thisFlatWaveBool = np.logical_and(flatBool, (waveBand == thisBand))
thisFlatWaveInds = np.where(thisFlatWaveBool)
uniqPolAngs = np.unique(polAng[thisFlatWaveInds])
for thisAng in uniqPolAngs:
# Compute the unique values for the binning level
thisFlatAngBool = np.logical_and(thisFlatWaveBool, (polAng == thisAng))
thisFlatAngInds = np.where(thisFlatAngBool)
uniqBins = np.unique(binning[thisFlatAngInds]).astype(int)
for thisBin in uniqBins:
# Construct the flatKey and filename for this image
flatKey = (thisBand, thisAng, thisBin)
flatKeyStr = '{0:s}_{1:g}_{2:g}'.format(*flatKey)
masterFlatFilename = 'MasterFlat' + flatKeyStr + '.fits'
masterFlatFilename = os.path.join(calibrationDir, masterFlatFilename)
# Test if the file exists
if os.path.isfile(masterFlatFilename):
# Read in the file if it exists
print('\nLoading file into masterFlat list')
print(masterFlatFilename)
masterFlat = ai.MasterFlat.read(masterFlatFilename)
masterFlat = masterFlat.astype(np.float32)
masterFlatDict.update({flatKey: masterFlat})
# If a master flat was found, then continue out of the loop...
continue
# Locate the raw bias images with this binning
thisFlatBinBool = np.logical_and(thisFlatAngBool, (binning == thisBin))
thisFlatBinInds = np.where(thisFlatBinBool)
flatImgFiles = fileIndex['Filename'][thisFlatBinInds]
# Otherwise continue to read in and process a masterFlat image
# Create the file if it does not exist
print('\nProcessing {0} flats for'.format(flatImgFiles.size))
print('band = {0:s}'.format(thisBand))
print('polAng = {0:g}'.format(thisAng))
print('binning = ({0:g}x{0:g})'.format(thisBin))
# Loop through each of the files and add them to the biasImgList list
flatImgList = []
for filename in flatImgFiles:
# Read the raw flat image from disk
rawFlat = ai.RawFlat.read(filename)
# Apply basic data processing
reducedFlat = rawFlat.process_image(
bias=masterBiasDict[thisBin],
dark=masterDarkDict[thisBin]
)
# Divide the flat image by its own mode
reducedFlat = reducedFlat/reducedFlat.mode
# Add the mode-normalized flat to the list of flat images
flatImgList.append(reducedFlat)
# Loop through all the flats and determine which ones have
# lights-on vs. lights-off
# Start by grabbing all the stats for each flat.
flatStats = [img.sigma_clipped_stats() for img in flatImgList]
flatStats = np.array(flatStats)
# Now compute the SNR level in each flat
flatSNRs = flatStats[:,1]/flatStats[:,2]
# Estimate that the images with SNR > 1.5 are lights-on images
thisFlatOnBool = (flatSNRs > 1.5)
thisFlatOnInds = np.where(thisFlatOnBool)
thisFlatOffBool = np.logical_not(thisFlatOnBool)
thisFlatOffInds = np.where(thisFlatOffBool)
# Convert the list of images into an array for indexing
flatImgList = np.array(flatImgList)
# Grab the lights-on flats and compute the average image
if np.sum(thisFlatOnBool.astype(int)) > 0:
# Grab the lights-on images
flatOnImgList = flatImgList[thisFlatOnInds]
# Construct an ImageStack out of the dark image list
flatStack = ai.ImageStack(flatOnImgList)
# Compute the master flat
masterOnFlat = flatStack.combine_images()
# Divide the master flat by its own mode (re-normalize)
masterOnFlat = masterOnFlat/masterOnFlat.mode
# Convert to a 32 bit float
masterOnFlat = masterOnFlat.astype(np.float32)
else:
raise RuntimeError('There are no flat images. This is a major problem!')
# Grab the lights-off flats and compute the average image
if np.sum(thisFlatOffBool.astype(int)) > 0:
# Grab the lights-on images
flatOffImgList = flatImgList[thisFlatOnInds]
# Construct an ImageStack out of the dark image list
flatStack = ai.ImageStack(flatOffImgList)
# Compute the master flat
masterOffFlat = flatStack.combine_images()
# Divide the master flat by its own mode (re-normalize)
masterOffFlat = masterOffFlat/masterOffFlat.mode
# Convert to a 32 bit float
masterOffFlat = masterOffFlat.astype(np.float32)
else:
# Not having any lights-off flats probabbly isn't such a big deal
flatOffImgList = []
masterOffFlat = 0
# Compute the difference between the lights-on and lights-off flats
masterFlat = masterOnFlat - masterOffFlat
# Make sure the header is fully up to date
masterFlat._properties_to_header()
# Catch any instances of where the flat is zero and set to one.
zeroPix = (masterFlat.data == 0)
if np.sum(zeroPix) > 0:
# Copy the data array
fixedArray = masterFlat.data.copy()
# Find the pixels where there are zeros
zeroInds = np.where(zeroPix)
fixedArray[zeroInds] = 1
# Replace the master flat data array
masterFlat.data = fixedArray
# Store the master dark in the dictionary of colibration data
masterFlatDict.update({flatKey: masterFlat})
# Write masterFlat object to disk
masterFlat.write(masterFlatFilename, dtype=np.float32, clobber=True)
# Do a quick cleanup to make sure that memory survives
del flatOnImgList
del flatOffImgList
del flatStack
# Let the user know everything completed
print('\n..........')
print('Finished producing master calibration fields')
| mit |
jat255/hyperspy | hyperspy/drawing/_widgets/range.py | 4 | 22490 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.widgets import SpanSelector
import inspect
import logging
from hyperspy.drawing.widgets import ResizableDraggableWidgetBase
from hyperspy.events import Events, Event
_logger = logging.getLogger(__name__)
# Track if we have already warned when the widget is out of range
already_warn_out_of_range = False
def in_interval(number, interval):
if interval[0] <= number <= interval[1]:
return True
else:
return False
class RangeWidget(ResizableDraggableWidgetBase):
"""RangeWidget is a span-patch based widget, which can be
dragged and resized by mouse/keys. Basically a wrapper for
ModifiablepanSelector so that it conforms to the common widget interface.
For optimized changes of geometry, the class implements two methods
'set_bounds' and 'set_ibounds', to set the geometry of the rectangle by
value and index space coordinates, respectivly.
Implements the internal method _validate_geometry to make sure the patch
will always stay within bounds.
"""
def __init__(self, axes_manager, ax=None, alpha=0.5, **kwargs):
# Parse all kwargs for the matplotlib SpanSelector
self._SpanSelector_kwargs = {}
for key in inspect.signature(SpanSelector).parameters.keys():
if key in kwargs:
self._SpanSelector_kwargs[key] = kwargs.pop(key)
super(RangeWidget, self).__init__(axes_manager, alpha=alpha, **kwargs)
self.span = None
def set_on(self, value):
if value is not self.is_on() and self.ax is not None:
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
self.disconnect()
try:
self.ax.figure.canvas.draw_idle()
except BaseException: # figure does not exist
pass
if value is False:
self.ax = None
self.__is_on = value
def _add_patch_to(self, ax):
self.span = ModifiableSpanSelector(ax, **self._SpanSelector_kwargs)
self.span.set_initial(self._get_range())
self.span.bounds_check = True
self.span.snap_position = self.snap_position
self.span.snap_size = self.snap_size
self.span.can_switch = True
self.span.events.changed.connect(self._span_changed, {'obj': 'widget'})
self.span.step_ax = self.axes[0]
self.span.tolerance = 5
self.patch = [self.span.rect]
self.patch[0].set_color(self.color)
self.patch[0].set_alpha(self.alpha)
def _span_changed(self, widget):
r = self._get_range()
pr = widget.range
if r != pr:
dx = self.axes[0].scale
x = pr[0] + 0.5 * dx
w = pr[1] + 0.5 * dx - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._validate_geometry()
if self._pos != np.array([x]) or self._size != np.array([w]):
self._update_patch_size()
self._apply_changes(old_size=old_size, old_position=old_position)
def _get_range(self):
p = self._pos[0]
w = self._size[0]
offset = self.axes[0].scale
p -= 0.5 * offset
return (p, p + w)
def _parse_bounds_args(self, args, kwargs):
if len(args) == 1:
return args[0]
elif len(args) == 4:
return args
elif len(kwargs) == 1 and 'bounds' in kwargs:
return kwargs.values()[0]
else:
x = kwargs.pop('x', kwargs.pop('left', self._pos[0]))
if 'right' in kwargs:
w = kwargs.pop('right') - x
else:
w = kwargs.pop('w', kwargs.pop('width', self._size[0]))
return x, w
def set_ibounds(self, *args, **kwargs):
"""
Set bounds by indices. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right'
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
ix, iw = self._parse_bounds_args(args, kwargs)
x = self.axes[0].index2value(ix)
w = self._i2v(self.axes[0], ix + iw) - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def set_bounds(self, *args, **kwargs):
"""
Set bounds by values. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right' (x+w)
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
global already_warn_out_of_range
def warn(obj, parameter, value):
global already_warn_out_of_range
if not already_warn_out_of_range:
_logger.info('{}: {} is out of range. It is therefore set '
'to the value of {}'.format(obj, parameter, value))
already_warn_out_of_range = True
x, w = self._parse_bounds_args(args, kwargs)
l0, h0 = self.axes[0].low_value, self.axes[0].high_value
scale = self.axes[0].scale
in_range = 0
if x < l0:
x = l0
warn(self, '`x` or `left`', x)
elif h0 <= x:
x = h0 - scale
warn(self, '`x` or `left`', x)
else:
in_range += 1
if w < scale:
w = scale
warn(self, '`width` or `right`', w)
elif not (l0 + scale <= x + w <= h0 + scale):
if self.size != np.array([w]): # resize
w = h0 + scale - self.position[0]
warn(self, '`width` or `right`', w)
if self.position != np.array([x]): # moved
x = h0 + scale - self.size[0]
warn(self, '`x` or `left`', x)
else:
in_range += 1
# if we are in range again, reset `already_warn_out_of_range` to False
if in_range == 2 and already_warn_out_of_range:
_logger.info('{} back in range.'.format(self.__class__.__name__))
already_warn_out_of_range = False
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def _update_patch_position(self):
self._update_patch_geometry()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on() and self.span is not None:
self.span.range = self._get_range()
def disconnect(self):
super(RangeWidget, self).disconnect()
if self.span:
self.span.turn_off()
self.span = None
def _set_snap_position(self, value):
super(RangeWidget, self)._set_snap_position(value)
self.span.snap_position = value
self._update_patch_geometry()
def _set_snap_size(self, value):
super(RangeWidget, self)._set_snap_size(value)
self.span.snap_size = value
self._update_patch_size()
def _validate_geometry(self, x1=None):
"""Make sure the entire patch always stays within bounds. First the
position (either from position property or from x1 argument), is
limited within the bounds. Then, if the right edge are out of
bounds, the position is changed so that they will be at the limit.
The modified geometry is stored, but no change checks are performed.
Call _apply_changes after this in order to process any changes (the
size might change if it is set larger than the bounds size).
"""
xaxis = self.axes[0]
# Make sure widget size is not larger than axes
self._size[0] = min(self._size[0], xaxis.size * xaxis.scale)
# Make sure x1 is within bounds
if x1 is None:
x1 = self._pos[0] # Get it if not supplied
if x1 < xaxis.low_value:
x1 = xaxis.low_value
elif x1 > xaxis.high_value:
x1 = xaxis.high_value
# Make sure x2 is with upper bound.
# If not, keep dims, and change x1!
x2 = x1 + self._size[0]
if x2 > xaxis.high_value + xaxis.scale:
x2 = xaxis.high_value + xaxis.scale
x1 = x2 - self._size[0]
self._pos = np.array([x1])
# Apply snaps if appropriate
if self.snap_position:
self._do_snap_position()
if self.snap_size:
self._do_snap_size()
class ModifiableSpanSelector(SpanSelector):
def __init__(self, ax, **kwargs):
onselect = kwargs.pop('onselect', self.dummy)
direction = kwargs.pop('direction', 'horizontal')
useblit = kwargs.pop('useblit', ax.figure.canvas.supports_blit)
SpanSelector.__init__(self, ax, onselect, direction=direction,
useblit=useblit, span_stays=False, **kwargs)
# The tolerance in points to pick the rectangle sizes
self.tolerance = 2
self.on_move_cid = None
self._range = None
self.step_ax = None
self.bounds_check = False
self._button_down = False
self.snap_size = False
self.snap_position = False
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget was changed.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.can_switch = False
def dummy(self, *args, **kwargs):
pass
def _get_range(self):
self.update_range()
return self._range
def _set_range(self, value):
self.update_range()
if self._range != value:
resized = (
self._range[1] -
self._range[0]) != (
value[1] -
value[0])
moved = self._range[0] != value[0]
self._range = value
if moved:
self._set_span_x(value[0])
self.events.moved.trigger(self)
if resized:
self._set_span_width(value[1] - value[0])
self.events.resized.trigger(self)
if moved or resized:
self.draw_patch()
self.events.changed.trigger(self)
range = property(_get_range, _set_range)
def _set_span_x(self, value):
if self.direction == 'horizontal':
self.rect.set_x(value)
else:
self.rect.set_y(value)
def _set_span_width(self, value):
if self.direction == 'horizontal':
self.rect.set_width(value)
else:
self.rect.set_height(value)
def _get_span_x(self):
if self.direction == 'horizontal':
return self.rect.get_x()
else:
return self.rect.get_y()
def _get_span_width(self):
if self.direction == 'horizontal':
return self.rect.get_width()
else:
return self.rect.get_height()
def _get_mouse_position(self, event):
if self.direction == 'horizontal':
return event.xdata
else:
return event.ydata
def set_initial(self, initial_range=None):
"""
Remove selection events, set the spanner, and go to modify mode.
"""
if initial_range is not None:
self.range = initial_range
self.disconnect_events()
# And connect to the new ones
self.connect_event('button_press_event', self.mm_on_press)
self.connect_event('button_release_event', self.mm_on_release)
self.connect_event('draw_event', self.update_background)
self.rect.set_visible(True)
self.rect.contains = self.contains
def update(self, *args):
# Override the SpanSelector `update` method to blit properly all
# artirts before we go to "modify mode" in `set_initial`.
self.draw_patch()
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if self.useblit and hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig._update_animated()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError:
pass # When figure is None, typically when closing
def contains(self, mouseevent):
x, y = self.rect.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
v = x if self.direction == 'vertical' else y
# Assert y is correct first
if not (0.0 <= v <= 1.0):
return False, {}
x_pt = self._get_point_size_in_data_units()
hit = self._range[0] - x_pt, self._range[1] + x_pt
if hit[0] < self._get_mouse_position < hit[1]:
return True, {}
return False, {}
def release(self, event):
"""When the button is released, the span stays in the screen and the
iteractivity machinery passes to modify mode"""
if self.pressv is None or (self.ignore(
event) and not self._button_down):
return
self._button_down = False
self.update_range()
self.set_initial()
def _get_point_size_in_data_units(self):
# Calculate the point size in data units
invtrans = self.ax.transData.inverted()
(x, y) = (1, 0) if self.direction == 'horizontal' else (0, 1)
x_pt = self.tolerance * abs((invtrans.transform((x, y)) -
invtrans.transform((0, 0)))[y])
return x_pt
def mm_on_press(self, event):
if self.ignore(event) and not self._button_down:
return
self._button_down = True
x_pt = self._get_point_size_in_data_units()
# Determine the size of the regions for moving and stretching
self.update_range()
left_region = self._range[0] - x_pt, self._range[0] + x_pt
right_region = self._range[1] - x_pt, self._range[1] + x_pt
middle_region = self._range[0] + x_pt, self._range[1] - x_pt
if in_interval(self._get_mouse_position(event), left_region) is True:
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
elif in_interval(self._get_mouse_position(event), right_region):
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
elif in_interval(self._get_mouse_position(event), middle_region):
self.pressv = self._get_mouse_position(event)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_rect)
else:
return
def update_range(self):
self._range = (self._get_span_x(),
self._get_span_x() + self._get_span_width())
def switch_left_right(self, x, left_to_right):
if left_to_right:
if self.step_ax is not None:
if x > self.step_ax.high_value + self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r0 = self._range[1]
self._set_span_x(r0)
r1 = r0 + w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
else:
if self.step_ax is not None:
if x < self.step_ax.low_value - self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r1 = self._range[0]
r0 = r1 - w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
self._range = (r0, r1)
def move_left(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x < self.step_ax.low_value - self.step_ax.scale):
return
if self.snap_position:
snap_offset = self.step_ax.offset - 0.5 * self.step_ax.scale
elif self.snap_size:
snap_offset = self._range[1]
if self.snap_position or self.snap_size:
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the left edge beyond the right one.
if x >= self._range[1]:
if self.can_switch and x > self._range[1]:
self.switch_left_right(x, True)
self.move_right(event)
return
width_increment = self._range[0] - x
if self._get_span_width() + width_increment <= 0:
return
self._set_span_x(x)
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.moved.trigger(self)
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_right(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x > self.step_ax.high_value + self.step_ax.scale):
return
if self.snap_size:
snap_offset = self._range[0]
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the right edge beyond the left one.
if x <= self._range[0]:
if self.can_switch and x < self._range[0]:
self.switch_left_right(x, False)
self.move_left(event)
return
width_increment = x - self._range[1]
if self._get_span_width() + width_increment <= 0:
return
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_rect(self, event):
if self._button_down is False or self.ignore(event):
return
x_increment = self._get_mouse_position(event) - self.pressv
if self.step_ax is not None:
if (self.bounds_check
and self._range[0] <= self.step_ax.low_value
and self._get_mouse_position(event) <= self.pressv):
return
if (self.bounds_check
and self._range[1] >= self.step_ax.high_value
and self._get_mouse_position(event) >= self.pressv):
return
if self.snap_position:
rem = x_increment % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x_increment += rem
self._set_span_x(self._get_span_x() + x_increment)
self.update_range()
self.pressv += x_increment
self.events.moved.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def mm_on_release(self, event):
if self._button_down is False or self.ignore(event):
return
self._button_down = False
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = None
def turn_off(self):
self.disconnect_events()
if self.on_move_cid is not None:
self.canvas.mpl_disconnect(self.on_move_cid)
self.ax.patches.remove(self.rect)
self.ax.figure.canvas.draw_idle()
| gpl-3.0 |
juanshishido/project-eta | code/utils/linear_fit.py | 3 | 3780 | from __future__ import division
import numpy as np # the Python array package
import pandas as pd
import matplotlib.pyplot as plt # the Python plotting package
from scipy.stats import gamma
import nibabel as nib
import numpy.linalg as npl
from utils.load_data import *
from utils.stimuli import events2neural
def hrf(times):
"""Produce hemodynamic response function for 'times'
Parameters
----------
times : times in seconds for the events
Returns
-------
nparray of length == len(times) with hemodynamic repsonse values for each time course
Example
-------
>>> tr_times = np.arange(240) * 2
>>> hrf(tr_times)[:5]
array([ 0. , 0.13913511, 0.6 , 0.58888885, 0.25576589])
"""
# Gamma pdf for the peak
peak_values = gamma.pdf(times, 6)
# Gamma pdf for the undershoot
undershoot_values = gamma.pdf(times, 12)
# Combine them
values = peak_values - 0.35 * undershoot_values
# Scale max to 0.6
return values / np.max(values) * 0.6
#build gain and loss columns of the design matrix
def build_design(data,behavdata):
"""Builds design matrix with columns for gain,loss, and a convolved regressor
Parameters
----------
data : fMRI data for a singe sunject
behavdata: behavioral data for a single subject
condition: condition to convolved the hemodynamic response function
Returns
-------
design matrix in the form of a numpy array with 4 columns
Example
-------
>>> data = get_image(1,1).get_data()
>>> behavdata = get_behav(1,1)
>>> build_design(data,behavdata).shape
(240,6)
"""
gains = behavdata['gain']
losses = behavdata['loss']
TR = 2
n_vols = data.shape[-1]
neural_prediction = time_course_behav(behavdata, TR, n_vols)
tr_times = np.arange(n_vols) * TR
#buidling gain losses columns of design matrix
gain_loss = np.zeros((neural_prediction.shape[0], 2))
j = 0
for i in range(len(neural_prediction)):
if neural_prediction[i] != 0:
gain_loss[i,0] = gains[j]
gain_loss[i, 1] = losses[j]
j = j + 1
#building last column of the design matrix
gains = gain_loss[:, 0]
losses = gain_loss[:, 1]
hrf_at_trs = hrf(tr_times)
convolved1 = np.convolve(neural_prediction, hrf_at_trs)
convolved2 = np.convolve(gains, hrf_at_trs)
convolved3 = np.convolve(losses, hrf_at_trs)
convolved = np.column_stack((convolved1, convolved2, convolved3))
n_to_remove = len(hrf_at_trs) - 1
std_convolved = np.zeros((convolved.shape[0]-n_to_remove,convolved.shape[1]))
design = np.ones((convolved.shape[0]-n_to_remove, 6))
# standardize the convolved regressors
for i in range(3):
conv = convolved[:-n_to_remove, i]
sd = np.std(conv)
avg = np.mean(conv)
std_convolved[:, i] = (conv - avg)
# add linear nad quadratic drifts
linear_drift = np.linspace(-1, 1, n_vols)
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
#final steps of design
design[:, 1:4] = std_convolved
design[:, 4] = linear_drift
design[:, 5] = quadratic_drift
return design
def regression_fit(data, design):
"""Finally uses the design matrix from build_design() and fits a linear regression to each voxel
Parameters
----------
data : fMRI data for a singe sunject
design: matrix returned by build_design()
Returns
-------
numpy array of estimated betas for each voxel
Example
-------
>>> data = get_image(1,1).get_data()
>>> behavdata = get_behav(1,1)
>>> design = build_design(data,behavdata)
>>> regression_fit(data, design).shape
(8, 139264)
"""
data_2d = np.reshape(data, (-1, data.shape[-1]))
betas_2d = npl.pinv(design).dot(data_2d.T)
return betas_2d
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
mikegraham/dask | dask/dataframe/tests/test_dataframe.py | 1 | 61150 | from operator import getitem
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
from dask.async import get_sync
from dask.utils import raises, ignoring
import dask.dataframe as dd
from dask.dataframe.core import (repartition_divisions, _loc,
_coerce_loc_index, aca, reduction, _concat, _Frame)
from dask.dataframe.utils import eq
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert eq(d[d['b'] > 2], full[full['b'] > 2])
assert eq(d[['a', 'b']], full[['a', 'b']])
assert eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert eq(d.head(2), full.head(2))
assert eq(d.head(3), full.head(3))
assert eq(d.head(2), dsk[('x', 0)].head(2))
assert eq(d['a'].head(2), full['a'].head(2))
assert eq(d['a'].head(3), full['a'].head(3))
assert eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert eq(d.tail(2), full.tail(2))
assert eq(d.tail(3), full.tail(3))
assert eq(d.tail(2), dsk[('x', 2)].tail(2))
assert eq(d['a'].tail(2), full['a'].tail(2))
assert eq(d['a'].tail(3), full['a'].tail(3))
assert eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_index_head():
assert eq(d.index.head(2), full.index[:2])
assert eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert eq((d + 1), full + 1)
assert repr(d.a).startswith('dd.Series')
def test_repr():
df = pd.DataFrame({'x': list(range(100))})
ddf = dd.from_pandas(df, 3)
for x in [ddf, ddf.index, ddf.x]:
assert type(x).__name__ in repr(x)
assert x._name[:5] in repr(x)
assert str(x.npartitions) in repr(x)
assert len(repr(x)) < 80
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10))]:
ddf = dd.from_pandas(case, 3)
assert eq(ddf.index, case.index)
assert repr(ddf.index).startswith('dd.Index')
assert raises(AttributeError, lambda: ddf.index.index)
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
assert raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == 'b'
assert eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == 'b'
assert eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert d4.index.name == 'b'
assert eq(d4, full.set_index('b'))
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame({'A': list('ABAABBABAA'),
'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'C': [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.set_index('A', drop=drop),
pdf.set_index('A', drop=drop))
assert eq(ddf.set_index('B', drop=drop),
pdf.set_index('B', drop=drop))
assert eq(ddf.set_index('C', drop=drop),
pdf.set_index('C', drop=drop))
assert eq(ddf.set_index(ddf.A, drop=drop),
pdf.set_index(pdf.A, drop=drop))
assert eq(ddf.set_index(ddf.B, drop=drop),
pdf.set_index(pdf.B, drop=drop))
assert eq(ddf.set_index(ddf.C, drop=drop),
pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame({0: list('ABAABBABAA'),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert eq(ddf.set_index(0, drop=drop),
pdf.set_index(0, drop=drop))
assert eq(ddf.set_index(2, drop=drop),
pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with tm.assertRaisesRegexp(NotImplementedError, msg):
ddf.set_index(['a', 'b'])
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._pd.columns, pd.Index(['x', 'y']))
assert eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with tm.assertRaisesRegexp(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert eq(ds, s)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert eq(s.describe(), ds.describe())
assert eq(df.describe(), ddf.describe())
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert eq(df.describe(), ddf.describe())
def test_cumulative():
pdf = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 5)
assert eq(ddf.cumsum(), pdf.cumsum())
assert eq(ddf.cumprod(), pdf.cumprod())
assert eq(ddf.cummin(), pdf.cummin())
assert eq(ddf.cummax(), pdf.cummax())
assert eq(ddf.cumsum(axis=1), pdf.cumsum(axis=1))
assert eq(ddf.cumprod(axis=1), pdf.cumprod(axis=1))
assert eq(ddf.cummin(axis=1), pdf.cummin(axis=1))
assert eq(ddf.cummax(axis=1), pdf.cummax(axis=1))
assert eq(ddf.a.cumsum(), pdf.a.cumsum())
assert eq(ddf.a.cumprod(), pdf.a.cumprod())
assert eq(ddf.a.cummin(), pdf.a.cummin())
assert eq(ddf.a.cummax(), pdf.a.cummax())
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert eq(ddf.x.dropna(), df.x.dropna())
assert eq(ddf.y.dropna(), df.y.dropna())
assert eq(ddf.z.dropna(), df.z.dropna())
assert eq(ddf.dropna(), df.dropna())
assert eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert eq(ddf.where(ddcond), pdf.where(pdcond))
assert eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert eq(dd.map_partitions(lambda a, b: a + b, None, d.a, d.b),
full.a + full.b)
assert eq(dd.map_partitions(lambda a, b, c: a + b + c, None, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert eq(d.map_partitions(lambda df: df, columns=d.columns), full)
assert eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1), columns=None)
assert eq(result, full.sum(axis=1))
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d.columns, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d).dask)
assert sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask) == \
sorted(dd.map_partitions(lambda x: x, d.columns, d, token=1).dask)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d.columns, d, d).dask) == \
sorted(dd.map_partitions(func, d.columns, d, d).dask)
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a.columns, a)
tm.assert_index_equal(b.columns, a.columns)
assert eq(df, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x.name, a.x)
assert b.name == a.x.name
assert eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, None, a)
assert b.name is None
assert isinstance(b, dd.Series)
b = dd.map_partitions(lambda df: df.x + 1, 'x', a)
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1, columns=None)
assert isinstance(b, dd.Series)
assert b.name is None
b = a.map_partitions(lambda df: df.x + 1, columns='x')
assert isinstance(b, dd.Series)
assert b.name == 'x'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
# can't detect duplicates only from cached data
assert eq(d.a.drop_duplicates(), full.a.drop_duplicates())
assert eq(d.drop_duplicates(), full.drop_duplicates())
assert eq(d.index.drop_duplicates(), full.index.drop_duplicates())
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
if pd.__version__ < '0.17':
kwargs = [{'take_last': False}, {'take_last': True}]
else:
kwargs = [{'keep': 'first'}, {'keep': 'last'}]
for kwarg in kwargs:
assert eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_set_partition():
d2 = d.set_partition('b', [0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_partition('b', [0, 2, 9])
d3 = d.set_partition('b', [0, 2, 9], compute=True)
assert eq(d2, d3)
assert eq(d2, full.set_index('b'))
assert eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_partition(d.b, [0, 2, 9])
d5 = d.set_partition(d.b, [0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert eq(d4, d5)
assert eq(d4, exp)
assert eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_division():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_division(0)
assert isinstance(div1, dd.DataFrame)
assert eq(div1, pdf.loc[0:3])
div2 = ddf.get_division(1)
assert eq(div2, pdf.loc[4:7])
div3 = ddf.get_division(2)
assert eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_division(0)
assert isinstance(div1, dd.Series)
assert eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_division(1)
assert eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_division(2)
assert eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with tm.assertRaises(ValueError):
ddf.get_division(-1)
with tm.assertRaises(ValueError):
ddf.get_division(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
a = dd.from_pandas(df, npartitions=3)
result = a.x.value_counts()
expected = df.x.value_counts()
# because of pandas bug, value_counts doesn't hold name (fixed in 0.17)
# https://github.com/pydata/pandas/pull/10419
assert eq(result, expected, check_names=False)
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
def test_isin():
assert eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert eq(ddf.quantile(axis=1), df.quantile(axis=1))
assert raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert eq(d.index, full.index)
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert eq(d.loc[5], full.loc[5:5])
assert eq(d.loc[3:8], full.loc[3:8])
assert eq(d.loc[:8], full.loc[:8])
assert eq(d.loc[3:], full.loc[3:])
assert eq(d.a.loc[5], full.a.loc[5:5])
assert eq(d.a.loc[3:8], full.a.loc[3:8])
assert eq(d.a.loc[:8], full.a.loc[:8])
assert eq(d.a.loc[3:], full.a.loc[3:])
assert raises(KeyError, lambda: d.loc[1000])
assert eq(d.loc[1000:], full.loc[1000:])
assert eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(get=dask.get)
assert eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({'x': [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = tm.makeTimeSeries(10).iloc[:5]
B = tm.makeTimeSeries(10).iloc[5:]
s = dd.Series({('df', 0): A, ('df', 1): B}, 'df', None,
[A.index.min(), B.index.min(), B.index.max()])
assert s.loc['2000': '2010'].divisions == s.divisions
assert eq(s.loc['2000': '2010'], s)
assert len(s.loc['2000-01-03': '2000-01-05'].compute()) == 3
def test_loc_with_series():
assert eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_getitem():
df = pd.DataFrame({'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'B': [9, 8, 7, 6, 5, 4, 3, 2, 1],
'C': [True, False, True] * 3},
columns=list('ABC'))
ddf = dd.from_pandas(df, 2)
assert eq(ddf['A'], df['A'])
tm.assert_series_equal(ddf['A']._pd, ddf._pd['A'])# check cache consistency
assert eq(ddf[['A', 'B']], df[['A', 'B']])
tm.assert_frame_equal(ddf[['A', 'B']]._pd, ddf._pd[['A', 'B']])
assert eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._pd, ddf._pd.C)
assert eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
assert raises(KeyError, lambda: df['X'])
assert raises(KeyError, lambda: df[['A', 'X']])
assert raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert eq(ddf[0], df[0])
assert eq(ddf[[1, 2]], df[[1, 2]])
assert raises(KeyError, lambda: df[8])
assert raises(KeyError, lambda: df[[1, 8]])
def test_assign():
assert eq(d.assign(c=d.a + 1, e=d.a + d.b),
full.assign(c=full.a + 1, e=full.a + full.b))
def test_map():
assert eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert eq(d.a.map(lk), full.a.map(lk))
assert eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert eq(d.a.map(lk), full.a.map(lk))
assert eq(d.b.map(lk), full.b.map(lk))
assert raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert eq(e, f)
assert eq(d.a, type(d.a)(*d.a._args))
assert eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame({('x', 0): 'foo', ('x', 1): 'bar'}, 'x',
['a', 'b'], divisions=[None, None, None])
assert not df.known_divisions
df = dd.DataFrame({('x', 0): 'foo'}, 'x',
['a', 'b'], divisions=[0, 1])
assert d.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None, None, None])
full = d.compute(get=dask.get)
assert eq(d.a.sum(), full.a.sum())
assert eq(d.a + d.b + 1, full.a + full.b + 1)
def test_concat2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
a = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
b = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
c = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60],
'd': [70, 80, 90]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10],
'd': [90, 80, 70]},
index=[3, 4, 5])}
d = dd.DataFrame(dsk, 'y', ['b', 'c', 'd'], [0, 3, 5])
cases = [[a, b], [a, c], [a, d]]
assert dd.concat([a]) is a
for case in cases:
result = dd.concat(case)
pdcase = [c.compute() for c in case]
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase), result)
assert result.dask == dd.concat(case).dask
result = dd.concat(case, join='inner')
assert result.npartitions == case[0].npartitions + case[1].npartitions
assert result.divisions == (None, ) * (result.npartitions + 1)
assert eq(pd.concat(pdcase, join='inner'), result)
assert result.dask == dd.concat(case, join='inner').dask
msg = ('Unable to concatenate DataFrame with unknown division '
'specifying axis=1')
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case, axis=1)
def test_concat3():
pdf1 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCDE'), index=list('abcdef'))
pdf2 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCFG'), index=list('ghijkl'))
pdf3 = pd.DataFrame(np.random.randn(6, 5),
columns=list('ABCHI'), index=list('mnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
result = dd.concat([ddf1, ddf2])
assert result.divisions == ddf1.divisions[:-1] + ddf2.divisions
assert result.npartitions == ddf1.npartitions + ddf2.npartitions
assert eq(result, pd.concat([pdf1, pdf2]))
assert eq(dd.concat([ddf1, ddf2], interleave_partitions=True),
pd.concat([pdf1, pdf2]))
result = dd.concat([ddf1, ddf2, ddf3])
assert result.divisions == (ddf1.divisions[:-1] + ddf2.divisions[:-1] +
ddf3.divisions)
assert result.npartitions == (ddf1.npartitions + ddf2.npartitions +
ddf3.npartitions)
assert eq(result, pd.concat([pdf1, pdf2, pdf3]))
assert eq(dd.concat([ddf1, ddf2, ddf3], interleave_partitions=True),
pd.concat([pdf1, pdf2, pdf3]))
def test_concat4_interleave_partitions():
pdf1 = pd.DataFrame(np.random.randn(10, 5),
columns=list('ABCDE'), index=list('abcdefghij'))
pdf2 = pd.DataFrame(np.random.randn(13, 5),
columns=list('ABCDE'), index=list('fghijklmnopqr'))
pdf3 = pd.DataFrame(np.random.randn(13, 6),
columns=list('CDEXYZ'), index=list('fghijklmnopqr'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
msg = ('All inputs have known divisions which cannot be '
'concatenated in order. Specify '
'interleave_partitions=True to ignore order')
cases = [[ddf1, ddf1], [ddf1, ddf2], [ddf1, ddf3], [ddf2, ddf1],
[ddf2, ddf3], [ddf3, ddf1], [ddf3, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat(case)
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
msg = "'join' must be 'inner' or 'outer'"
with tm.assertRaisesRegexp(ValueError, msg):
dd.concat([ddf1, ddf1], join='invalid', interleave_partitions=True)
def test_concat5():
pdf1 = pd.DataFrame(np.random.randn(7, 5),
columns=list('ABCDE'), index=list('abcdefg'))
pdf2 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('abcdefg'))
pdf3 = pd.DataFrame(np.random.randn(7, 6),
columns=list('FGHIJK'), index=list('cdefghi'))
pdf4 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('cdefghi'))
pdf5 = pd.DataFrame(np.random.randn(7, 5),
columns=list('FGHAB'), index=list('fklmnop'))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 3)
ddf3 = dd.from_pandas(pdf3, 2)
ddf4 = dd.from_pandas(pdf4, 2)
ddf5 = dd.from_pandas(pdf5, 3)
cases = [[ddf1, ddf2], [ddf1, ddf3], [ddf1, ddf4], [ddf1, ddf5],
[ddf3, ddf4], [ddf3, ddf5], [ddf5, ddf1, ddf4], [ddf5, ddf3],
[ddf1.A, ddf4.A], [ddf2.F, ddf3.F], [ddf4.A, ddf5.A],
[ddf1.A, ddf4.F], [ddf2.F, ddf3.H], [ddf4.A, ddf5.B],
[ddf1, ddf4.A], [ddf3.F, ddf2], [ddf5, ddf1.A, ddf2]]
for case in cases:
pdcase = [c.compute() for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
# Dask + pandas
cases = [[ddf1, pdf2], [ddf1, pdf3], [pdf1, ddf4],
[pdf1.A, ddf4.A], [ddf2.F, pdf3.F],
[ddf1, pdf4.A], [ddf3.F, pdf2], [ddf2, pdf1, ddf3.F]]
for case in cases:
pdcase = [c.compute() if isinstance(c, _Frame) else c for c in case]
assert eq(dd.concat(case, interleave_partitions=True),
pd.concat(pdcase))
assert eq(dd.concat(case, join='inner', interleave_partitions=True),
pd.concat(pdcase, join='inner'))
assert eq(dd.concat(case, axis=1), pd.concat(pdcase, axis=1))
assert eq(dd.concat(case, axis=1, join='inner'),
pd.concat(pdcase, axis=1, join='inner'))
def test_append():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
df3 = pd.DataFrame({'b': [1, 2, 3, 4, 5, 6],
'c': [1, 2, 3, 4, 5, 6]},
index=[6, 7, 8, 9, 10, 11])
ddf = dd.from_pandas(df, 2)
ddf2 = dd.from_pandas(df2, 2)
ddf3 = dd.from_pandas(df3, 2)
s = pd.Series([7, 8], name=6, index=['a', 'b'])
assert eq(ddf.append(s), df.append(s))
assert eq(ddf.append(ddf2), df.append(df2))
assert eq(ddf.a.append(ddf2.a), df.a.append(df2.a))
# different columns
assert eq(ddf.append(ddf3), df.append(df3))
assert eq(ddf.a.append(ddf3.b), df.a.append(df3.b))
# dask + pandas
assert eq(ddf.append(df2), df.append(df2))
assert eq(ddf.a.append(df2.a), df.a.append(df2.a))
assert eq(ddf.append(df3), df.append(df3))
assert eq(ddf.a.append(df3.b), df.a.append(df3.b))
df4 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
'b': [1, 2, 3, 4, 5, 6]},
index=[4, 5, 6, 7, 8, 9])
ddf4 = dd.from_pandas(df4, 2)
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
with tm.assertRaisesRegexp(ValueError, msg):
ddf.append(ddf4)
def test_append2():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
ddf1 = dd.DataFrame(dsk, 'x', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'a': [10, 20, 30], 'b': [40, 50, 60]}),
('y', 1): pd.DataFrame({'a': [40, 50, 60], 'b': [30, 20, 10]}),
('y', 2): pd.DataFrame({'a': [70, 80, 90], 'b': [0, 0, 0]})}
ddf2 = dd.DataFrame(dsk, 'y', ['a', 'b'], [None, None])
dsk = {('y', 0): pd.DataFrame({'b': [10, 20, 30], 'c': [40, 50, 60]}),
('y', 1): pd.DataFrame({'b': [40, 50, 60], 'c': [30, 20, 10]})}
ddf3 = dd.DataFrame(dsk, 'y', ['b', 'c'], [None, None])
assert eq(ddf1.append(ddf2), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1), ddf3.b.compute().append(ddf1.compute()))
# Dask + pandas
assert eq(ddf1.append(ddf2.compute()), ddf1.compute().append(ddf2.compute()))
assert eq(ddf2.append(ddf1.compute()), ddf2.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf2.compute()), ddf1.a.compute().append(ddf2.compute()))
assert eq(ddf2.a.append(ddf1.compute()), ddf2.a.compute().append(ddf1.compute()))
# different columns
assert eq(ddf1.append(ddf3.compute()), ddf1.compute().append(ddf3.compute()))
assert eq(ddf3.append(ddf1.compute()), ddf3.compute().append(ddf1.compute()))
# Series + DataFrame
assert eq(ddf1.a.append(ddf3.compute()), ddf1.a.compute().append(ddf3.compute()))
assert eq(ddf3.b.append(ddf1.compute()), ddf3.b.compute().append(ddf1.compute()))
def test_dataframe_series_are_pickleable():
import pickle
cloudpickle = pytest.importorskip('cloudpickle')
dumps = cloudpickle.dumps
loads = pickle.loads
e = d.groupby(d.a).b.sum()
f = loads(dumps(e))
assert eq(e, f)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5])
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert len(a.compute()) + len(b.compute()) == len(full)
def test_series_nunique():
ps = pd.Series(list('aaabbccccdddeee'), name='a')
s = dd.from_pandas(ps, npartitions=3)
assert eq(s.nunique(), ps.nunique())
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_partition('y', ['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert eq(orig, sp)
assert eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert eq(a, b)
assert eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
assert raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (_loc, ('a', 0), 0, 6, False),
('b', 1): (_loc, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (_loc, ('a', 0), 1, 3, False),
('b', 1): (_loc, ('a', 1), 3, 4, False),
('b', 2): (_loc, ('a', 1), 4, 6, False),
('b', 3): (_loc, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, (list, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert eq(ddf, df.y)
def test_repartition_npartitions():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
for n in [1, 2, 4, 5]:
for k in [1, 2, 4, 5]:
if k > n:
continue
a = dd.from_pandas(df, npartitions=n)
k = min(a.npartitions, k)
b = a.repartition(npartitions=k)
eq(a, b)
assert b.npartitions == k
a = dd.from_pandas(df, npartitions=1)
with pytest.raises(ValueError):
a.repartition(npartitions=5)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert eq(a.x.dropna(), df.x.dropna())
assert eq(a.x.fillna(100), df.x.fillna(100))
assert eq(a.fillna(100), df.fillna(100))
assert eq(a.x.between(2, 4), df.x.between(2, 4))
assert eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert eq(a.x.notnull(), df.x.notnull())
assert eq(a.x.isnull(), df.x.isnull())
assert eq(a.notnull(), df.notnull())
assert eq(a.isnull(), df.isnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert eq(a.x.dt.date, df.x.dt.date, check_names=False)
assert (a.x.dt.to_pydatetime().compute() == df.x.dt.to_pydatetime()).all()
assert a.x.dt.date.dask == a.x.dt.date.dask
assert a.x.dt.to_pydatetime().dask == a.x.dt.to_pydatetime().dask
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D']})
a = dd.from_pandas(df, 2)
assert 'upper' in dir(a.x.str)
assert eq(a.x.str.upper(), df.x.str.upper())
assert a.x.str.upper().dask == a.x.str.upper().dask
def test_empty_max():
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
['x'], [None, None, None])
assert eq(a.x.max(), 1)
def test_loc_on_numpy_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(np.datetime64, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame({'x': [1, 2, 3]},
index=list(map(pd.Timestamp, ['2014', '2015', '2016'])))
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert eq(a.loc['2014': '2015'], a.loc['2014': '2015'])
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t('2014')], '2014'), t)
def test_nlargest_series():
s = pd.Series([1, 3, 5, 2, 4, 6])
ss = dd.from_pandas(s, npartitions=2)
assert eq(ss.nlargest(2), s.nlargest(2))
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert eq(q, df.query('x**2 > y'))
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.18.0',
reason="eval inplace not supported")
def test_eval():
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
with ignoring(ImportError):
assert eq(p.eval('x + y'), d.eval('x + y'))
assert eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
def test_deterministic_arithmetic_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted((a.x + a.y ** 2).dask) == sorted((a.x + a.y ** 2).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x + a.y ** 3).dask)
assert sorted((a.x + a.y ** 2).dask) != sorted((a.x - a.y ** 2).dask)
def test_deterministic_reduction_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert a.x.sum()._name == a.x.sum()._name
assert a.x.mean()._name == a.x.mean()._name
assert a.x.var()._name == a.x.var()._name
assert a.x.min()._name == a.x.min()._name
assert a.x.max()._name == a.x.max()._name
assert a.x.count()._name == a.x.count()._name
# Test reduction without token string
assert (sorted(reduction(a.x, len, np.sum).dask) !=
sorted(reduction(a.x, np.sum, np.sum).dask))
assert (sorted(reduction(a.x, len, np.sum).dask) ==
sorted(reduction(a.x, len, np.sum).dask))
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x.name).dask) !=
sorted(aca(a.x, f2, f2, a.x.name).dask))
assert (sorted(aca(a.x, f, f, a.x.name).dask) ==
sorted(aca(a.x, f, f, a.x.name).dask))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert eq(a.drop('y', axis=1), df.drop('y', axis=1))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert eq(np.cos(df['x']), np.cos(ddf['x']))
assert eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
assert raises(ValueError, lambda: d.rename(index=renamer))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert a.to_frame()._known_dtype
assert eq(s.to_frame(), a.to_frame())
assert eq(s.to_frame('bar'), a.to_frame('bar'))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert eq(ddf.x.apply(lambda x: x + 1),
df.x.apply(lambda x: x + 1))
# specify columns
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
assert eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# result will be dataframe
func = lambda x: pd.Series([x, x])
assert eq(ddf.x.apply(func, name=[0, 1]), df.x.apply(func))
# inference
assert eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
def test_cov():
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=3)
assert eq(ddf.cov(), df.cov())
assert eq(ddf.cov(10), df.cov(10))
assert ddf.cov()._name == ddf.cov()._name
assert ddf.cov(10)._name != ddf.cov()._name
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=3)
db = dd.from_pandas(b, npartitions=4)
assert eq(da.cov(db), a.cov(b))
assert eq(da.cov(db, 10), a.cov(b, 10))
assert da.cov(db)._name == da.cov(db)._name
assert da.cov(db, 10)._name != da.cov(db)._name
def test_corr():
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=3)
assert eq(ddf.corr(), df.corr())
assert eq(ddf.corr(min_periods=10), df.corr(min_periods=10))
assert ddf.corr()._name == ddf.corr()._name
assert ddf.corr(min_periods=10)._name != ddf.corr()._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=3)
db = dd.from_pandas(b, npartitions=4)
assert eq(da.corr(db), a.corr(b))
assert eq(da.corr(db, min_periods=10), a.corr(b, min_periods=10))
assert da.corr(db)._name == da.corr(db)._name
assert da.corr(db, min_periods=10)._name != da.corr(db)._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_stable():
df = pd.DataFrame(np.random.random((20000000, 2)) * 2 - 1, columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert eq(ddf.cov(), df.cov())
assert eq(ddf.corr(), df.corr())
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert eq(result, df.x.apply(return_df2))
# Series to Series
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert (i.index.day == a.index.day.compute()).all()
assert (i.index.month == a.index.month.compute()).all()
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.nlargest(5, 'a')
exp = df.nlargest(5, 'a')
eq(res, exp)
@pytest.mark.skipif(LooseVersion(pd.__version__) <= '0.16.2',
reason="nlargest not in pandas pre 0.16.2")
def test_nlargest_multiple_columns():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.nlargest(5, ['a', 'b'])
expected = df.nlargest(5, ['a', 'b'])
eq(result, expected)
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.reset_index()
exp = df.reset_index()
assert len(res.index.compute()) == len(exp.index)
tm.assert_index_equal(res.columns, exp.columns)
tm.assert_numpy_array_equal(res.compute().values, exp.values)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_from_delayed():
from dask import delayed
dfs = [delayed(tm.makeTimeDataFrame)(i) for i in range(1, 5)]
df = dd.from_delayed(dfs, metadata=['A', 'B', 'C', 'D'])
assert (df.compute().columns == df.columns).all()
assert list(df.map_partitions(len).compute()) == [1, 2, 3, 4]
ss = [df.A for df in dfs]
s = dd.from_delayed(ss, metadata='A')
assert s.compute().name == s.name
assert list(s.map_partitions(len).compute()) == [1, 2, 3, 4]
s = dd.from_delayed(ss)
assert s._known_dtype
assert s.compute().name == s.name
df = dd.from_delayed(dfs, tm.makeTimeDataFrame(1))
assert df._known_dtype
assert list(df.columns) == ['A', 'B', 'C', 'D']
df = dd.from_delayed(dfs)
assert df._known_dtype
assert list(df.columns) == ['A', 'B', 'C', 'D']
def test_to_delayed():
from dask.delayed import Delayed
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert eq(a.compute(), df.iloc[:2])
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert eq(a.astype(float), df.astype(float))
assert eq(a.x.astype(float), df.x.astype(float))
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_set_index_sorted_true():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index('x', sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
eq(a.set_index('x', drop=drop),
df.set_index('x', drop=drop))
eq(a.set_index(a.x, sorted=True, drop=drop),
df.set_index(df.x, drop=drop))
eq(a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop))
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(pd.Series.min)._name !=
df.x.map_partitions(pd.Series.max)._name)
def test_sorted_index_single_partition():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
eq(ddf.set_index('x', sorted=True),
df.set_index('x'))
def test_info(capsys):
df = pd.DataFrame({'long_column_name': [1, 2, 3, 4], 'short_name': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.info()
out, err = capsys.readouterr()
assert out == ("<class 'dask.dataframe.core.DataFrame'>\n"
"Data columns (total 2 columns):\n"
"long_column_name int64\n"
"short_name int64\n")
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/neighbors/tests/test_lof.py | 34 | 4142 | # Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
from math import sqrt
import numpy as np
from sklearn import neighbors
from numpy.testing import assert_array_equal
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.datasets import load_iris
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof():
# Toy sample (the last two samples are outliers):
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]]
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert_greater(np.min(score[:-2]), np.max(score[-2:]))
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25,
n_neighbors=5).fit(X)
assert_array_equal(clf._predict(), 6 * [1] + 2 * [-1])
def test_lof_performance():
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = neighbors.LocalOutlierFactor().fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf._decision_function(X_test)
# check that roc_auc is good
assert_greater(roc_auc_score(y_test, y_pred), .99)
def test_lof_values():
# toy samples:
X_train = [[1, 1], [1, 2], [2, 1]]
clf = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X_train)
s_0 = 2. * sqrt(2.) / (1. + sqrt(2.))
s_1 = (1. + sqrt(2)) * (1. / (4. * sqrt(2.)) + 1. / (2. + 2. * sqrt(2)))
# check predict()
assert_array_almost_equal(-clf.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_array_almost_equal(-clf._decision_function([[2., 2.]]), [s_0])
# # check predict(one sample already in train)
assert_array_almost_equal(-clf._decision_function([[1., 1.]]), [s_1])
def test_lof_precomputed(random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X._predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute',
metric='precomputed')
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D._predict(DYX)
assert_array_almost_equal(pred_X_X, pred_D_X)
assert_array_almost_equal(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
assert_warns_message(UserWarning,
"n_neighbors will be set to (n_samples - 1)",
clf.fit, X)
assert_equal(clf.n_neighbors_, X.shape[0] - 1)
| bsd-3-clause |
alexandrebarachant/mne-python | tutorials/plot_introduction.py | 1 | 15178 | # -*- coding: utf-8 -*-
"""
.. _intro_tutorial:
Basic MEG and EEG data processing
=================================
MNE-Python reimplements most of MNE-C's (the original MNE command line utils)
functionality and offers transparent scripting.
On top of that it extends MNE-C's functionality considerably
(customize events, compute contrasts, group statistics, time-frequency
analysis, EEG-sensor space analyses, etc.) It uses the same files as standard
MNE unix commands: no need to convert your files to a new system or database.
What you can do with MNE Python
-------------------------------
- **Raw data visualization** to visualize recordings, can also use
*mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
- **Epoching**: Define epochs, baseline correction, handle conditions etc.
- **Averaging** to get Evoked data
- **Compute SSP projectors** to remove ECG and EOG artifacts
- **Compute ICA** to remove artifacts or select latent sources.
- **Maxwell filtering** to remove environmental noise.
- **Boundary Element Modeling**: single and three-layer BEM model
creation and solution computation.
- **Forward modeling**: BEM computation and mesh creation
(see :ref:`ch_forward`)
- **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
- **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map,
Time-Frequency MxNE)
- **Connectivity estimation** in sensor and source space
- **Visualization of sensor and source space data**
- **Time-frequency** analysis with Morlet wavelets (induced power,
intertrial coherence, phase lock value) also in the source space
- **Spectrum estimation** using multi-taper method
- **Mixed Source Models** combining cortical and subcortical structures
- **Dipole Fitting**
- **Decoding** multivariate pattern analyis of M/EEG topographies
- **Compute contrasts** between conditions, between sensors, across
subjects etc.
- **Non-parametric statistics** in time, space and frequency
(including cluster-level)
- **Scripting** (batch and parallel computing)
What you're not supposed to do with MNE Python
----------------------------------------------
- **Brain and head surface segmentation** for use with BEM
models -- use Freesurfer.
- **Raw movement compensation** -- use Elekta Maxfilter™
.. note:: This package is based on the FIF file format from Neuromag. It
can read and convert CTF, BTI/4D, KIT and various EEG formats to
FIF.
Installation of the required materials
---------------------------------------
See :ref:`getting_started` with Python.
.. note:: The expected location for the MNE-sample data is
my-path-to/mne-python/examples. If you downloaded data and an example asks
you whether to download it again, make sure
the data reside in the examples directory and you run the script from its
current directory.
From IPython e.g. say::
cd examples/preprocessing
%run plot_find_ecg_artifacts.py
From raw data to evoked data
----------------------------
.. _ipython: http://ipython.scipy.org/
Now, launch `ipython`_ (Advanced Python shell) using the QT backend which best
supported across systems::
$ ipython --matplotlib=qt
First, load the mne package:
"""
import mne
##############################################################################
# If you'd like to turn information status messages off:
mne.set_log_level('WARNING')
##############################################################################
# But it's generally a good idea to leave them on:
mne.set_log_level('INFO')
##############################################################################
# You can set the default level by setting the environment variable
# "MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
mne.set_config('MNE_LOGGING_LEVEL', 'WARNING', set_env=True)
##############################################################################
# Note that the location of the mne-python preferences file (for easier manual
# editing) can be found using:
mne.get_config_path()
##############################################################################
# By default logging messages print to the console, but look at
# mne.set_log_file() to save output to a file.
#
# Access raw data
# ^^^^^^^^^^^^^^^
from mne.datasets import sample # noqa
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
print(raw_fname)
##############################################################################
# .. note:: The MNE sample dataset should be downloaded automatically but be
# patient (approx. 2GB)
#
# Read data from file:
raw = mne.io.read_raw_fif(raw_fname)
print(raw)
print(raw.info)
##############################################################################
# Look at the channels in raw:
print(raw.ch_names)
##############################################################################
# Read and plot a segment of raw data
start, stop = raw.time_as_index([100, 115]) # 100 s to 115 s data segment
data, times = raw[:, start:stop]
print(data.shape)
print(times.shape)
data, times = raw[2:20:3, start:stop] # access underlying data
raw.plot()
##############################################################################
# Save a segment of 150s of raw data (MEG only):
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
exclude='bads')
raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
overwrite=True)
##############################################################################
# Define and read epochs
# ^^^^^^^^^^^^^^^^^^^^^^
#
# First extract events:
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5])
##############################################################################
# Note that, by default, we use stim_channel='STI 014'. If you have a different
# system (e.g., a newer system that uses channel 'STI101' by default), you can
# use the following to set the default stim channel to use for finding events:
mne.set_config('MNE_STIM_CHANNEL', 'STI101', set_env=True)
##############################################################################
# Events are stored as 2D numpy array where the first column is the time
# instant and the last one is the event number. It is therefore easy to
# manipulate.
#
# Define epochs parameters:
event_id = dict(aud_l=1, aud_r=2) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
##############################################################################
# Exclude some channels (original bads + 2 more):
raw.info['bads'] += ['MEG 2443', 'EEG 053']
##############################################################################
# The variable raw.info['bads'] is just a python list.
#
# Pick the good channels, excluding raw.info['bads']:
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False,
exclude='bads')
##############################################################################
# Alternatively one can restrict to magnetometers or gradiometers with:
mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
##############################################################################
# Define the baseline period:
baseline = (None, 0) # means from the first instant to t = 0
##############################################################################
# Define peak-to-peak rejection parameters for gradiometers, magnetometers
# and EOG:
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
##############################################################################
# Read epochs:
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=False, reject=reject)
print(epochs)
##############################################################################
# Get single epochs for one condition:
epochs_data = epochs['aud_l'].get_data()
print(epochs_data.shape)
##############################################################################
# epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time
# instants).
#
# Scipy supports read and write of matlab files. You can save your single
# trials with:
from scipy import io # noqa
io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
##############################################################################
# or if you want to keep all the information about the data you can save your
# epochs in a fif file:
epochs.save('sample-epo.fif')
##############################################################################
# and read them later with:
saved_epochs = mne.read_epochs('sample-epo.fif')
##############################################################################
# Compute evoked responses for auditory responses by averaging and plot it:
evoked = epochs['aud_l'].average()
print(evoked)
evoked.plot()
##############################################################################
# .. topic:: Exercise
#
# 1. Extract the max value of each epoch
max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
##############################################################################
# It is also possible to read evoked data stored in a fif file:
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked1 = mne.read_evokeds(
evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Or another one stored in the same file:
evoked2 = mne.read_evokeds(
evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Two evoked objects can be contrasted using :func:`mne.combine_evoked`.
# This function can use ``weights='equal'``, which provides a simple
# element-by-element subtraction (and sets the
# :attr:`mne.Evoked.nave` attribute properly based on the underlying number
# of trials) using either equivalent call:
contrast = mne.combine_evoked([evoked1, evoked2], weights=[0.5, -0.5])
contrast = mne.combine_evoked([evoked1, -evoked2], weights='equal')
print(contrast)
##############################################################################
# To do a weighted sum based on the number of averages, which will give
# you what you would have gotten from pooling all trials together in
# :class:`mne.Epochs` before creating the :class:`mne.Evoked` instance,
# you can use ``weights='nave'``:
average = mne.combine_evoked([evoked1, evoked2], weights='nave')
print(contrast)
##############################################################################
# Instead of dealing with mismatches in the number of averages, we can use
# trial-count equalization before computing a contrast, which can have some
# benefits in inverse imaging (note that here ``weights='nave'`` will
# give the same result as ``weights='equal'``):
epochs_eq = epochs.copy().equalize_event_counts(['aud_l', 'aud_r'])[0]
evoked1, evoked2 = epochs_eq['aud_l'].average(), epochs_eq['aud_r'].average()
print(evoked1)
print(evoked2)
contrast = mne.combine_evoked([evoked1, -evoked2], weights='equal')
print(contrast)
##############################################################################
# Time-Frequency: Induced power and inter trial coherence
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Define parameters:
import numpy as np # noqa
n_cycles = 2 # number of cycles in Morlet wavelet
freqs = np.arange(7, 30, 3) # frequencies of interest
##############################################################################
# Compute induced power and phase-locking values and plot gradiometers:
from mne.time_frequency import tfr_morlet # noqa
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
return_itc=True, decim=3, n_jobs=1)
# power.plot()
##############################################################################
# Inverse modeling: MNE and dSPM on evoked and raw data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Import the required functions:
from mne.minimum_norm import apply_inverse, read_inverse_operator # noqa
##############################################################################
# Read the inverse operator:
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inverse_operator = read_inverse_operator(fname_inv)
##############################################################################
# Define the inverse parameters:
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"
##############################################################################
# Compute the inverse solution:
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
##############################################################################
# Save the source time courses to disk:
stc.save('mne_dSPM_inverse')
##############################################################################
# Now, let's compute dSPM on a raw file within a label:
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
label = mne.read_label(fname_label)
##############################################################################
# Compute inverse solution during the first 15s:
from mne.minimum_norm import apply_inverse_raw # noqa
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop)
##############################################################################
# Save result in stc files:
stc.save('mne_dSPM_raw_inverse_Aud')
##############################################################################
# What else can you do?
# ^^^^^^^^^^^^^^^^^^^^^
#
# - detect heart beat QRS component
# - detect eye blinks and EOG artifacts
# - compute SSP projections to remove ECG or EOG artifacts
# - compute Independent Component Analysis (ICA) to remove artifacts or
# select latent sources
# - estimate noise covariance matrix from Raw and Epochs
# - visualize cross-trial response dynamics using epochs images
# - compute forward solutions
# - estimate power in the source space
# - estimate connectivity in sensor and source space
# - morph stc from one brain to another for group studies
# - compute mass univariate statistics base on custom contrasts
# - visualize source estimates
# - export raw, epochs, and evoked data to other python data analysis
# libraries e.g. pandas
# - and many more things ...
#
# Want to know more ?
# ^^^^^^^^^^^^^^^^^^^
#
# Browse `the examples gallery <auto_examples/index.html>`_.
print("Done!")
| bsd-3-clause |
Merinorus/adaisawesome | Homework/03 - Interactive Viz/Mapping onto Switzerland.py | 1 | 1723 |
# coding: utf-8
# In this part of the exercise, we now need to put the data which we have procured about the funding levels of the different universities that are located in different cantons onto a canton map. We will do so using Folio and take the example TopoJSON mapping which they use.
# In[15]:
import folium
import pandas as pd
# In[1]:
# Test seeing Switzerland
ch_map = folium.Map(location=[47.3769, 8.5417], tiles='Stamen Toner',
zoom_start=13)
ch_map.save('stamen_toner.html')
ch_map
# Now do the TopoJSON overlay
# In[61]:
# Import the Switzerland map (from the folio pylib notebook)
topo_path = r'ch-cantons.topojson.json'
# Import our csv file with all of the values for the amounts of the grants
data = 'P3_GrantExport.csv'
# Insert coordinates that are for Switzerland (i.e. 9.594226,47.525058)
ch_map = folium.Map(location=[46.8769, 8.6017], tiles='Mapbox Bright',
zoom_start=7)
ch_map.choropleth(geo_path=topo_path, topojson='objects.ch-cantons')
ch_map.save('ch_map.html')
ch_map
# In[ ]:
# Need to use colors wisely - becaue this is continuous and not descrete, we will be using different shades of green
# In[ ]:
#Catarina's test
import folium
import pandas as pd
topo_path = r'ch-cantons.topojson.json'
grants_data = pd.read_csv(state_unemployment)
#Let Folium determine the scale
map = folium.Map(location=[47.3769, 8.5417], zoom_start=13)
map.choropleth(geo_path=state_geo, data=grants_data,
columns=['Canton Shortname', 'Total Sum'],
key_on='feature.id',
fill_color='YlGn', fill_opacity=0.7, line_opacity=0.2,
legend_name='Total Grants Received (CHF)')
map.save('swiss_grants.html')
| gpl-3.0 |
wmvanvliet/mne-python | examples/decoding/plot_decoding_spoc_CMC.py | 9 | 3007 | """
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to identify
the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
`Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
"""
# Author: Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 250.) # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None, fir_design='firwin')
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30., fir_design='firwin')
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None,
detrend=1, decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
##############################################################################
# Plot the contributions to the detected components (i.e., the forward model)
spoc.fit(X, y)
spoc.plot_patterns(meg_epochs.info)
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
ADM91/PowerSystem-RL | visualize/visualize_state.py | 1 | 10292 | from matplotlib import pyplot as plt
from matplotlib import animation
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import numpy as np
from oct2py import octave
def visualize_state(ideal_case, ideal_state, state_list, fig_num=1, frames=20, save=False):
color_map = {0: 'black',
1: 'green'}
color_map_2 = {0: 'green',
1: 'red'}
# Initialize figure
fig = plt.figure(fig_num, figsize=(12, 12))
plt.ion()
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax2 = plt.subplot2grid((3, 2), (0, 1))
ax3 = plt.subplot2grid((3, 2), (1, 0), colspan=2)
ax4 = plt.subplot2grid((3, 2), (2, 0), colspan=2)
# Generator state plot
gen_max = ideal_case['gen'][(octave.isload(ideal_case['gen']) == 0).reshape((-1)), 8].reshape((-1,))
gen_ideal = ideal_state['real gen'][:, 1].reshape((-1,))
gen_bus = ideal_state['real gen'][:, 0].reshape((-1,))
cap_order = np.argsort(gen_max, axis=0, kind='quicksort')
gen_width = 0.25
gen_x = np.arange(len(gen_max))
ax1.set_xticks(gen_x)
ax1.set_xticklabels(['bus %d' % i for i in gen_bus[cap_order]])
ax1.set_title('Generator schedule')
ax1.set_ylabel('Power (MW)')
# Load state plot
d_load_ideal = -ideal_state['dispatch load'][:, 1].reshape((-1,))
d_load_bus = ideal_state['dispatch load'][:, 0].reshape((-1,))
d_load_order = np.argsort(d_load_ideal, axis=0, kind='quicksort')
f_load_ideal = ideal_state['fixed load'][:, 1].reshape((-1,))
f_load_bus = ideal_state['fixed load'][:, 0].reshape((-1,))
f_load_order = np.argsort(f_load_ideal, axis=0, kind='quicksort')
load_width = 0.5
load_x1 = np.arange(len(d_load_ideal))
load_x2 = np.arange(len(load_x1) + 1, len(load_x1) + 1 + len(f_load_ideal))
ticks = np.concatenate(
(['b %d' % i for i in d_load_bus[d_load_order]], ['b %d' % i for i in f_load_bus[f_load_order]]))
ax2.set_xticklabels(ticks)
ax2.set_xticks(np.concatenate((load_x1, load_x2)))
ax2.set_title('Load Profile')
ax2.set_ylabel('Power (MW)')
# Line state plot
mva_rating = ideal_case['branch'][:, 5].reshape((-1,))
real_inj_ideal = np.abs(ideal_state['real inj'][:, 2].reshape((-1,)))
real_inj_buses = np.abs(ideal_state['real inj'][:, 0:2].reshape((-1, 2)))
line_order = np.argsort(mva_rating, axis=0, kind='quicksort')
line_width = 0.25
line_x = np.arange(len(mva_rating))
ax3.set_xticks(line_x)
ticks = ['%d - %d' % (i[0], i[1]) for i in real_inj_buses[line_order]]
ax3.set_xticklabels(ticks)
ax3.set_title('Line loadings')
ax3.set_ylabel('Power (MW)')
# ax3.set_xlim([-1, len(line_order)])
# Line SPA plot
ax4.set_xticks(line_x)
ticks = ['%d - %d' % (i[0], i[1]) for i in real_inj_buses[line_order]]
ax4.set_xticklabels(ticks)
ax4.set_title('Line SPA differences')
ax4.set_ylabel('Degrees')
# ax4.set_xlim([-1, len(line_order)])
ax4.set_ylim([0, 40])
# Init dynamic plot objects
# line_spa_ref = ax4.plot(line_x, np.ones(len(line_x))*10, color='red', markersize=5)
line_spa_ref = ax4.axhline(y=10, color='black', linewidth=2)
gen_ideal = ax1.bar(gen_x - gen_width / 2, gen_ideal[cap_order], gen_width, align='center', alpha=0.9, color='blue')
gen_cap = ax1.bar(gen_x, gen_max[cap_order], gen_width*2, align='center', alpha=0.3)
gen_curr = ax1.bar(gen_x+gen_width/2, np.zeros(len(gen_x)), gen_width, align='center', alpha=0.9, color='red')
d_ideal = ax2.bar(load_x1, d_load_ideal[d_load_order], load_width, align='center', alpha=0.2)
d_curr = ax2.bar(load_x1, np.zeros(len(load_x1)), load_width, align='center', alpha=0.9, color='green')
f_ideal = ax2.bar(load_x2, f_load_ideal[f_load_order], load_width, align='center', alpha=0.2)
f_curr = ax2.bar(load_x2, np.zeros(len(load_x2)), load_width, align='center', alpha=0.9, color='green')
line_ideal = ax3.bar(line_x - line_width / 2, real_inj_ideal[line_order], line_width, align='center', alpha=0.9, color='blue')
line_rating = ax3.bar(line_x, mva_rating[line_order], line_width*2, align='center', alpha=0.3)
line_curr = ax3.bar(line_x+line_width/2, np.zeros(len(line_x)), line_width, align='center', alpha=0.9, color='red')
line_spa = ax4.bar(line_x, np.zeros(len(line_x)), line_width*2, align='center', alpha=0.9, color='green')
gen_limit_patch = mpatches.Patch(color='green', alpha=0.2, label='Limit')
gen_offline_patch = mpatches.Patch(color='black', alpha=0.2, label='Offline')
gen_ideal_patch = mpatches.Patch(color='blue', alpha=0.9, label='Ideal state')
gen_current_patch = mpatches.Patch(color='red', alpha=0.9, label='Current state')
load_ideal_patch = mpatches.Patch(color='blue', alpha=0.2, label='Ideal state')
load_offline_patch = mpatches.Patch(color='black', alpha=0.2, label='Blackout')
load_current_patch = mpatches.Patch(color='green', alpha=0.9, label='Current state')
line_limit_patch = mpatches.Patch(color='green', alpha=0.2, label='Limit')
line_offline_patch = mpatches.Patch(color='black', alpha=0.2, label='Offline')
line_ideal_patch = mpatches.Patch(color='blue', alpha=0.9, label='Ideal state')
line_current_patch = mpatches.Patch(color='red', alpha=0.9, label='Current state')
spa_green = mpatches.Patch(color='green', alpha=0.9, label='Line SPA diff')
spa_red = mpatches.Patch(color='red', alpha=0.9, label='Line in question')
spa_limit = mlines.Line2D([], [], color='black', label='SPA diff limit')
ax1.legend(handles=[gen_limit_patch, gen_offline_patch, gen_ideal_patch, gen_current_patch], loc='upper left')
ax2.legend(handles=[load_ideal_patch, load_offline_patch, load_current_patch], loc='upper right')
ax3.legend(handles=[line_limit_patch, line_offline_patch, line_ideal_patch, line_current_patch], loc='upper left')
ax4.legend(handles=[spa_green, spa_red, spa_limit], loc='upper left')
def update(frame):
# Manipulate frame
list_ind = int(np.floor(frame/frames))
between_frame = frame % frames
# Generator color
gen_island = state_list[list_ind]['real gen'][cap_order, -1]
for rect, color in zip(gen_cap, [color_map[ind] for ind in gen_island]):
rect.set_color(color)
# Generator values
gen_current_1 = state_list[list_ind]['real gen'][cap_order, 1].reshape((-1,))
gen_current_2 = state_list[list_ind+1]['real gen'][cap_order, 1].reshape((-1,))
gen_current_3 = gen_current_1 + (gen_current_2-gen_current_1)*(between_frame/frames)
for rect, height in zip(gen_curr, gen_current_3):
rect.set_height(height)
# Load color
d_load_island = state_list[list_ind]['dispatch load'][d_load_order, -1]
f_load_island = state_list[list_ind]['fixed load'][f_load_order, -1]
for rect, color in zip(d_ideal, [color_map[ind] for ind in d_load_island]):
rect.set_color(color)
for rect, color in zip(f_ideal, [color_map[ind] for ind in f_load_island]):
rect.set_color(color)
# Load values
d_load_current_1 = -state_list[list_ind]['dispatch load'][d_load_order, 1].reshape((-1,))
d_load_current_2 = -state_list[list_ind+1]['dispatch load'][d_load_order, 1].reshape((-1,))
d_load_current_3 = d_load_current_1 + (d_load_current_2-d_load_current_1)*(between_frame/frames)
f_load_current = state_list[list_ind]['fixed load'][f_load_order, 1].reshape((-1,))
for rect, height in zip(d_curr, d_load_current_3):
rect.set_height(height)
for rect, height in zip(f_curr, f_load_current):
rect.set_height(height)
# Line color
line_island = state_list[list_ind]['real inj'][line_order, -1]
for rect, height in zip(line_rating, [color_map[ind] for ind in line_island]):
rect.set_color(height)
# Line values
real_inj_current_1 = np.abs(state_list[list_ind]['real inj'][line_order, 2].reshape((-1,)))
real_inj_current_2 = np.abs(state_list[list_ind+1]['real inj'][line_order, 2].reshape((-1,)))
real_inj_current_3 = real_inj_current_1 + (real_inj_current_2-real_inj_current_1)*(between_frame/frames)
for rect, height in zip(line_curr, real_inj_current_3):
rect.set_height(height)
# Line SPA diff (Don't hate me for this ridiculous list comprehension)
SPA_bus1_1 = [state_list[list_ind]['bus voltage angle'][state_list[list_ind]['bus voltage angle'][:, 0] == i, 1] for i in state_list[list_ind]['real inj'][line_order, 0]]
SPA_bus2_1 = [state_list[list_ind]['bus voltage angle'][state_list[list_ind]['bus voltage angle'][:, 0] == i, 1] for i in state_list[list_ind]['real inj'][line_order, 1]]
SPA_diff_1 = np.abs(np.array(SPA_bus1_1) - np.array(SPA_bus2_1))
SPA_bus1_2 = [state_list[list_ind+1]['bus voltage angle'][state_list[list_ind+1]['bus voltage angle'][:, 0] == i, 1] for i in state_list[list_ind+1]['real inj'][line_order, 0]]
SPA_bus2_2 = [state_list[list_ind+1]['bus voltage angle'][state_list[list_ind+1]['bus voltage angle'][:, 0] == i, 1] for i in state_list[list_ind+1]['real inj'][line_order, 1]]
SPA_diff_2 = np.abs(np.array(SPA_bus1_2) - np.array(SPA_bus2_2))
SPA_diff_3 = SPA_diff_1 + (SPA_diff_2-SPA_diff_1)*(between_frame/frames)
for rect, height in zip(line_spa, SPA_diff_3):
rect.set_height(height)
# Detect if line state changes, if so, make it red
line_state_change = (state_list[list_ind+1]['real inj'][line_order, -1] - state_list[list_ind]['real inj'][line_order, -1])
for rect, color in zip(line_spa, [color_map_2[i] for i in line_state_change]):
rect.set_color(color)
list_ind
if state_list[list_ind+1]['Title']:
plt.suptitle(state_list[list_ind+1]['Title'], fontsize=18)
else:
plt.suptitle('')
return gen_cap, gen_curr, d_ideal, f_ideal, d_curr, f_curr, line_ideal, line_rating, line_curr, line_spa
animate = animation.FuncAnimation(fig, update, frames=(len(state_list)-1)*frames-1, interval=20)
plt.show()
if save:
animate.save('Animate.gif', writer='imagemagick', dpi=40)
return animate
| gpl-3.0 |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 2 | 51684 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(
0.0 - epsilon, 1.0 + epsilon, key, metrics)
class EmbeddingMultiplierTest(tf.test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = tf.contrib.layers.one_hot_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib._multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = tf.constant([[0], [0], [0]], dtype=tf.int32)
with self.assertRaisesRegexp(
ValueError, 'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, tf.contrib.learn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
embedding_wire = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('wire', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib._multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
tf.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = tf.constant([[0], [0], [0]], dtype=tf.int32)
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, tf.contrib.learn.ModeKeys.TRAIN, params)
with tf.train.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNLinearCombinedClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedClassifier)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testEmbeddingMultiplier(self):
embedding_language = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket('language', 10),
dimension=1, initializer=tf.constant_initializer(0.1))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual(
{embedding_language: 0.8},
classifier._estimator.params['embedding_lr_multipliers'])
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_feature = [tf.contrib.layers.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i): tf.reshape(tf.constant(iris.data[:, i], dtype=tf.float32),
[-1, 1])})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = tf.reshape(tf.constant(iris.target, dtype=tf.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [tf.contrib.layers.real_valued_column(str(i))
for i in range(4)]
linear_features = [
tf.contrib.layers.bucketized_column(
cont_features[i], test_data.get_quantile_based_buckets(
iris.data[:, i], 10)) for i in range(4)
]
linear_features.append(tf.contrib.layers.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = tf.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
embedding_features = [
tf.contrib.layers.embedding_column(sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = tf.contrib.learn.datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
labels = tf.constant([[1], [0], [0], [0]])
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
labels = tf.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
labels = tf.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=tf.train.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)
]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = tf.contrib.framework.get_global_step()
learning_rate = tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32)}
return features, labels
def _input_fn_predict():
y = tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=1)
features = {'x': y}
return features
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = tf.to_float(labels)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testVariableQuery(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[
tf.contrib.layers.real_valued_column('age'),
language,
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = tf.placeholder(tf.string)
return features, targets
classifier.export(export_dir, serving_input_fn, input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value('centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(2, len(classifier.linear_weights_))
self.assertEquals(1, len(classifier.linear_weights_['linear/age/weight']))
self.assertEquals(
100, len(classifier.linear_weights_['linear/language/weights']))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 99)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(99, len(classifier.linear_weights_))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertEquals(3, len(classifier.dnn_bias_))
self.assertEquals(3, len(classifier.dnn_weights_))
self.assertNotIn('linear/bias_weight', classifier.get_variable_names())
self.assertNotIn('linear/feature_BUCKETIZED_weights',
classifier.get_variable_names())
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
# hiddenlayer_0/weights,hiddenlayer_1/weights and dnn_logits/weights.
self.assertEquals(3, len(classifier.dnn_weights_))
# hiddenlayer_0/biases, hiddenlayer_1/biases, dnn_logits/biases.
self.assertEquals(3, len(classifier.dnn_bias_))
class DNNLinearCombinedRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
tf.contrib.metrics.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = tf.placeholder(tf.string)
return features, targets
regressor.export(export_dir, serving_input_fn, input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.train.limit_epochs(
tf.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs)}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_estimator = lambda: tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
classifier = new_estimator()
classifier.fit(input_fn=_input_fn, steps=10)
predictions = list(classifier.predict(input_fn=predict_input_fn))
del classifier
classifier = new_estimator()
predictions2 = list(classifier.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = tf.constant([[1000.], [30.], [20.], [20.]])
features = {'x': tf.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
fcooper8472/useful_scripts | system_monitor.py | 1 | 2019 | import psutil
import time
import matplotlib
matplotlib.use('svg')
from matplotlib import pyplot as plt
list_of_times = []
list_of_mem_free = []
list_of_swap_used = []
start_time = time.time()
timeout = 7200 # 2 hours
try:
while True:
list_of_times.append(time.time() - start_time)
# Current memory snapshot
mem_snapshot = psutil.virtual_memory()
swp_snapshot = psutil.swap_memory()
list_of_mem_free.append(100.0 * float(mem_snapshot[1]) / float(mem_snapshot[0]))
list_of_swap_used.append(100.0 * float(swp_snapshot[1]) / float(swp_snapshot[0]))
if list_of_times[-1] > timeout:
break
time.sleep(1.0)
except KeyboardInterrupt:
pass
# In case keyboard interrupt occurs between appends
if len(list_of_mem_free) != len(list_of_times) or len(list_of_swap_used) != len(list_of_times):
min_length = min(len(list_of_mem_free), len(list_of_swap_used), len(list_of_times))
list_of_times = list_of_times[0: min_length]
list_of_mem_free = list_of_mem_free[0: min_length]
list_of_swap_used = list_of_swap_used[0: min_length]
plt.rc('font', family='monospace')
plt.rc('figure', figsize=[8, 4.5]) #inches
plt.rc('axes', linewidth=0.5, edgecolor='0.4', axisbelow=True)
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
plt.rc('xtick.major', size=0, pad=4)
plt.rc('ytick.major', size=0, pad=4)
x_lims = [list_of_times[0], list_of_times[-1]]
y_lims = [0, 100]
plt.plot(list_of_times, list_of_mem_free, label=r'Mem free (%)', color='#F39200')
plt.plot(list_of_times, list_of_swap_used, label=r'Swap used (%)', color='#0072bd')
plt.xlim(x_lims)
plt.ylim(y_lims)
plt.title('Memory usage', fontsize=14, y=1.05)
plt.xlabel(r'Time (sec)', fontsize=12, labelpad=20)
plt.ylabel(r'Memory (%)', fontsize=12, labelpad=20)
ax = plt.gca()
ax.grid(b=True, which='major', color='0.4', linestyle='dotted', dash_capstyle='round')
plt.legend(fontsize=10)
plt.savefig('mem_free.svg', bbox_inches='tight', facecolor='0.9')
| bsd-3-clause |
pranavtbhat/EE219 | project3/part5.py | 1 | 3598 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import KFold
import part1
def load_dataset():
df = pd.read_csv(
'ml-100k/u.data',
delimiter='\t',
names = ['user_id', 'item_id', 'rating', 'timestamp'],
header=0
)
R = df.pivot_table(
index=['user_id'],
columns=['item_id'],
values='rating',
fill_value=0
).values
W = R.copy()
W[W > 0] = 1
return df.as_matrix(), R, W
def squared_error(R, W, U, V):
return np.sum((W * (R - np.dot(U, V))) ** 2)
if __name__ == "__main__":
data, R_mat, W_mat = load_dataset()
L = 5
n_folds = 10
test_length = len(data) / n_folds
top_movies_order = []
kf = KFold(n=len(data), n_folds=10, shuffle=True)
hcv = []
mcv = []
tcv = []
pcv = []
for train_index, test_index in kf:
print "Fold: ", 10 - n_folds + 1
test_data = data[test_index]
R_train = W_mat
W_train = R_mat
for j in range(test_length):
W_train[test_data[j][0] - 1, test_data[j][1] - 1] = 0
U,V = part1.matrix_factorize(R_train, W_train, 100, reg_param=0.01)
R_predicted = 5 * np.dot(U,V)
R_predicted[R_train == 0] = -1
for i in range(max(data[:,0])):
user_ratings = R_predicted[i]
top_movies = user_ratings.argsort()[-max(data[:,1]):][::-1]
top_movies_order.append(top_movies)
threshold = 3
hv=[]
mv=[]
tv=[]
pv=[]
for l in range(1,(L+1)):
hit = 0
miss = 0
total = 0
precision = 0
for i in range(max(data[:,0])):
rec_indices = R_predicted[i,0:l]
for j in range(len(rec_indices)):
rating = R_predicted[i][rec_indices[j]]
if (rating < 0):
continue
if (rating > threshold):
hit = hit + 1
total = total + 1
precision += 1
else:
miss = miss + 1
total = total + 1
pv.append(precision/float(total))
hv.append(hit)
tv.append(total)
mv.append(miss)
hcv.append(hv)
mcv.append(mv)
tcv.append(tv)
pcv.append(pv)
n_folds -= 1
precision = np.sum(pcv,axis=0)
hits = np.sum(hcv,axis=0)
miss = np.sum(mcv,axis=0)
total = np.sum(tcv,axis=0)
hits = hits / (total.astype(float))
miss = miss / (total.astype(float))
precision = precision / 10.0
print "Hits ", hits
print "Misses", miss
print "Precision : ", precision
plt.figure(1)
plt.ylabel('Hit rate')
plt.xlabel('L')
plt.title('Hit rate vs L')
plt.scatter(range(1,(L+1)), hits, s=60, marker='o')
plt.plot(range(1,(L+1)),hits)
plt.savefig("plots/Hit vs L.png",format='png')
plt.clf()
plt.figure(1)
plt.ylabel('False alarm')
plt.xlabel('L')
plt.title('False Alarm vs L')
plt.scatter(range(1,(L+1)), miss, s=60, marker='o')
plt.plot(range(1,(L+1)),miss)
plt.savefig("plots/False Alarm vs L.png",format='png')
plt.clf()
plt.figure(1)
plt.ylabel('Hit rate')
plt.xlabel('False Alarm')
plt.title('Hit rate vs False Alarm')
plt.scatter(miss, hits, s=60, marker='o')
plt.plot(miss,hits)
plt.savefig("plots/Hit rate vs False Alarm.png",format='png')
plt.clf()
| unlicense |
jjinking/datsci | datsci/recsys.py | 1 | 4947 | """Recommender systems
"""
# Author : Jin Kim jjinking(at)gmail(dot)com
# Creation date : 2014.03.25
# Last Modified : 2014.03.27
#
# License : MIT
import numpy as np
import pandas as pd
import scipy as sp
import scipy.spatial
from collections import defaultdict
class RecommenderFrame(pd.DataFrame):
'''
A recommender system class, extends pandas.DataFrame
'''
def __init__(self, *args, **kwargs):
'''
Initializer, makes call to pandas DataFrame initializer
Ensures that the data contains the following columns: user, item, rating
'''
super(RecommenderFrame, self).__init__(*args, **kwargs)
if len({'user', 'item', 'rating'}.intersection(set(self.columns))) != 3:
raise ValueError, "Column names must contain the following: user, item, rating"
def create_matrix(self):
'''
Create matrix where the rows are the users and the columns are the items,
and each element is a rating
'''
m = pd.DataFrame(self.values, columns=self.columns)
m.set_index(['user','item'], inplace=True)
m = m.unstack('item')
m.columns = m.columns.get_level_values(1)
return m
class CollabFilterFrame(RecommenderFrame):
'''
Class that implements collaborative filtering, extends Recommender
Based on the collaborative filtering algorithm presented in
Programming Collective Intelligence by Toby Segaran
'''
def similarity(self, u1, u2, method='pearson'):
'''
Return similarity(correlation) between two users using various distance functions
method must be one of {pearson, kendall, spearman, manhattan, euclidean}
For manhattan and euclidean, must return 1 / (1 + distance)
since distance 0 indicates high correlation, and vice versa
'''
u1_data = self[self['user'] == u1]
u2_data = self[self['user'] == u2]
pair = pd.merge(u1_data, u2_data, how='inner', on='item')[['rating_x','rating_y']]
# If there are no overlapping items between two users, return 0
if pair.shape[0] == 0:
return 0.0
if method in {'pearson', 'kendall', 'spearman'}:
return pair.corr(method=method).ix['rating_x', 'rating_y']
elif method == 'manhattan':
d = scipy.spatial.distance.cityblock(pair['rating_x'], pair['rating_y'])
else:
# By default, use Euclidean distance
#d = np.linalg.norm(pair['rating_x'] - pair['rating_y'])
d = scipy.spatial.distance.euclidean(pair['rating_x'], pair['rating_y'])
return 1.0 / (1 + d)
def get_user_matches(self, user, n=None, method='pearson'):
'''
Return n top matching users with given user
If n is not set, return all users sorted by similarity scores from highest to lowest
'''
scores = []
for u in self['user'].drop_duplicates():
if u != user:
s = self.similarity(user, u, method=method)
scores.append((s, u))
return sorted(scores, reverse=True)[:n]
def get_recommendations(self, user, n=None, method='pearson'):
'''
Return n top recommended items for given user
If n is not set, return all items sorted by recommendation scores from highest to lowest
'''
# Create dataframe of users and scores
rec_table = pd.DataFrame([(u,s) for s,u in self.get_user_matches(user, method=method)],
columns=['user', 'score'])
rec_table.set_index('user', inplace=True)
# Remove users with similarity score <= 0
rec_table = rec_table[rec_table['score'] > 0]
# Merge with user ratings table
rec_table = rec_table.join(self.create_matrix())
# Create weighted scores of unseen items
items_seen = set(self[self['user'] == user]['item'].dropna().drop_duplicates())
items = list(set(self['item'].drop_duplicates()) - items_seen)
sim_sum = defaultdict(float)
for item in items:
rec_table[item] = rec_table.apply(lambda row: row[item] * row['score'], axis=1)
sim_sum[item] += rec_table[rec_table[item].notnull()]['score'].sum()
# Create dataframe of rec_scores
rec_scores = pd.DataFrame(rec_table[items].sum(), columns=['rec_score'])
# Correct each recommendation score by the sum of the similarity scores of all users who rated the item
rec_scores['simsum'] = rec_scores.apply(lambda row: sim_sum[row.name], axis=1)
rec_scores['rec_score_corrected'] = rec_scores.apply(lambda row: row['rec_score'] / row['simsum'], axis=1)
item_scores = rec_scores['rec_score_corrected'].to_dict().items()
return sorted([(None if np.isnan(rec_score) else rec_score, item) for item,rec_score in item_scores],
reverse=True)[:n]
| mit |
lukeiwanski/tensorflow-opencl | tensorflow/examples/learn/iris.py | 19 | 1651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = tf.contrib.learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
INCF/pybids | bids/layout/layout.py | 1 | 55524 | """BIDSLayout class."""
import os
import json
import re
from collections import defaultdict
from io import open
from functools import partial, lru_cache
from itertools import chain
import copy
import warnings
import enum
import difflib
import sqlalchemy as sa
from bids_validator import BIDSValidator
from ..utils import listify, natural_sort
from ..external import inflect
from ..exceptions import (
BIDSDerivativesValidationError,
BIDSEntityError,
BIDSValidationError,
NoMatchError,
TargetError,
)
from .validation import (validate_root, validate_derivative_paths,
absolute_path_deprecation_warning,
indexer_arg_deprecation_warning)
from .writing import build_path, write_to_file
from .models import (Base, Config, BIDSFile, Entity, Tag)
from .index import BIDSLayoutIndexer
from .db import ConnectionManager
from .utils import (BIDSMetadata, parse_file_entities)
try:
from os.path import commonpath
except ImportError:
def commonpath(paths):
prefix = os.path.commonprefix(paths)
if not os.path.isdir(prefix):
prefix = os.path.dirname(prefix)
return prefix
__all__ = ['BIDSLayout']
class BIDSLayout(object):
"""Layout class representing an entire BIDS dataset.
Parameters
----------
root : str
The root directory of the BIDS dataset.
validate : bool, optional
If True, all files are checked for BIDS compliance when first indexed,
and non-compliant files are ignored. This provides a convenient way to
restrict file indexing to only those files defined in the "core" BIDS
spec, as setting validate=True will lead files in supplementary folders
like derivatives/, code/, etc. to be ignored.
absolute_paths : bool, optional
If True, queries always return absolute paths.
If False, queries return relative paths (for files and
directories).
derivatives : bool or str or list, optional
Specifies whether and/or which
derivatives to to index. If True, all pipelines found in the
derivatives/ subdirectory will be indexed. If a str or list, gives
the paths to one or more derivatives directories to index. If False
or None, the derivatives/ directory is ignored during indexing, and
derivatives will have to be added manually via add_derivatives().
Note: derivatives datasets MUST contain a dataset_description.json
file in order to be indexed.
config : str or list or None, optional
Optional name(s) of configuration file(s) to use.
By default (None), uses 'bids'.
sources : :obj:`bids.layout.BIDSLayout` or list or None, optional
Optional BIDSLayout(s) from which the current BIDSLayout is derived.
config_filename : str
Optional name of filename within directories
that contains configuration information.
regex_search : bool
Whether to require exact matching (True) or regex
search (False, default) when comparing the query string to each
entity in .get() calls. This sets a default for the instance, but
can be overridden in individual .get() requests.
database_path : str
Optional path to directory containing SQLite database file index
for this BIDS dataset. If a value is passed and the folder
already exists, indexing is skipped. By default (i.e., if None),
an in-memory SQLite database is used, and the index will not
persist unless .save() is explicitly called.
reset_database : bool
If True, any existing directory specified in the
database_path argument is deleted, and the BIDS dataset provided
in the root argument is reindexed. If False, indexing will be
skipped and the existing database file will be used. Ignored if
database_path is not provided.
indexer: BIDSLayoutIndexer or callable
An optional BIDSLayoutIndexer instance to use for indexing, or any
callable that takes a BIDSLayout instance as its only argument. If
None, a new indexer with default parameters will be implicitly created.
indexer_kwargs: dict
Optional keyword arguments to pass onto the newly created
BIDSLayoutIndexer. Valid keywords are 'ignore', 'force_index',
'index_metadata', and 'config_filename'. Ignored if indexer is not
None.
"""
def __init__(self, root=None, validate=True, absolute_paths=True,
derivatives=False, config=None, sources=None,
regex_search=False, database_path=None, reset_database=False,
indexer=None, **indexer_kwargs):
if not absolute_paths:
absolute_path_deprecation_warning()
ind_args = {'force_index', 'ignore', 'index_metadata', 'config_filename'}
if ind_args & set(indexer_kwargs.keys()):
indexer_arg_deprecation_warning()
# Load from existing database file
load_db = (database_path is not None and reset_database is False and
ConnectionManager.exists(database_path))
if load_db:
self.connection_manager = ConnectionManager(database_path)
info = self.connection_manager.layout_info
# Overwrite init args with values in DB
root = info.root
absolute_paths = info.absolute_paths
derivatives = info.derivatives
config = info.config
# Validate that a valid BIDS project exists at root
root, description = validate_root(root, validate)
self.root = root
self.description = description
self.absolute_paths = absolute_paths
self.derivatives = {}
self.sources = sources
self.regex_search = regex_search
# Initialize a completely new layout and index the dataset
if not load_db:
init_args = dict(root=root, absolute_paths=absolute_paths,
derivatives=derivatives, config=config)
self.connection_manager = ConnectionManager(
database_path, reset_database, config, init_args)
if indexer is None:
indexer = BIDSLayoutIndexer(validate=validate, **indexer_kwargs)
indexer(self)
# Add derivatives if any are found
if derivatives:
if derivatives is True:
derivatives = os.path.join(root, 'derivatives')
self.add_derivatives(
derivatives, parent_database_path=database_path,
validate=validate, absolute_paths=absolute_paths,
derivatives=None, sources=self, config=None,
regex_search=regex_search, reset_database=reset_database,
indexer=indexer, **indexer_kwargs)
def __getattr__(self, key):
"""Dynamically inspect missing methods for get_<entity>() calls
and return a partial function of get() if a match is found."""
if key.startswith('get_'):
ent_name = key.replace('get_', '')
entities = self.get_entities()
# Use inflect to check both singular and plural forms
if ent_name not in entities:
sing = inflect.engine().singular_noun(ent_name)
if sing in entities:
ent_name = sing
else:
raise BIDSEntityError(
"'get_{}' can't be called because '{}' isn't a "
"recognized entity name.".format(ent_name, ent_name))
return partial(self.get, return_type='id', target=ent_name)
# Spit out default message if we get this far
raise AttributeError("%s object has no attribute named %r" %
(self.__class__.__name__, key))
def __repr__(self):
"""Provide a tidy summary of key properties."""
n_subjects = len(
[s.value
for s in self.session.query(Tag).filter_by(
entity_name='subject').group_by(Tag._value)]
)
n_sessions = len(
set(
(t.value, t.file.entities.get('subject'))
for t in
self.session.query(Tag).filter_by(entity_name='session')
if t.file.entities.get('subject')
)
)
n_runs = len(
set(
(t.value, t.file.entities.get('subject'))
for t in
self.session.query(Tag).filter_by(entity_name='run')
if isinstance(t.value, int) and t.file.entities.get('subject')
)
)
root = self.root[-30:]
s = ("BIDS Layout: ...{} | Subjects: {} | Sessions: {} | "
"Runs: {}".format(root, n_subjects, n_sessions, n_runs))
return s
def _in_scope(self, scope):
"""Determine whether current BIDSLayout is in the passed scope.
Parameters
----------
scope : str or list
The intended scope(s). Each value must be one of 'all', 'raw',
'derivatives', or a pipeline name.
"""
scope = listify(scope)
if 'all' in scope:
return True
# We assume something is a BIDS-derivatives dataset if it either has a
# defined pipeline name, or is applying the 'derivatives' rules.
pl_name = self.description.get("PipelineDescription", {}).get("Name")
is_deriv = bool('derivatives' in self.config)
return ((not is_deriv and 'raw' in scope) or
(is_deriv and ('derivatives' in scope or pl_name in scope)))
def _get_layouts_in_scope(self, scope):
"""Return all layouts in the passed scope."""
if scope == 'self':
return [self]
def collect_layouts(layout):
"""Recursively build a list of layouts."""
children = list(layout.derivatives.values())
layouts = [collect_layouts(d) for d in children]
return [layout] + list(chain(*layouts))
layouts = [l for l in collect_layouts(self) if l._in_scope(scope)]
return list(set(layouts))
def _sanitize_query_dtypes(self, entities):
"""Automatically convert entity query values to correct dtypes."""
entities = entities.copy()
names = list(entities.keys())
ents = {e.name: e for e in
self.session.query(Entity)
.filter(Entity.name.in_(names)).all()}
# Fail silently because the DB may still know how to reconcile
# type differences.
for name, val in entities.items():
if isinstance(val, enum.Enum):
continue
try:
if isinstance(val, (list, tuple)):
entities[name] = [ents[name]._astype(v) for v in val]
else:
entities[name] = ents[name]._astype(val)
except:
pass
return entities
@property
def session(self):
return self.connection_manager.session
@property
@lru_cache()
def config(self):
return {c.name: c for c in self.session.query(Config).all()}
@property
def entities(self):
"""Get the entities."""
return self.get_entities()
@property
def files(self):
"""Get the files."""
return self.get_files()
@classmethod
def load(cls, database_path):
""" Load index from database path. Initalization parameters are set to
those found in database_path JSON sidecar.
Parameters
----------
database_path : str, Path
The path to the desired database folder. If a relative path is
passed, it is assumed to be relative to the BIDSLayout root
directory.
"""
return cls(database_path=database_path)
def save(self, database_path, replace_connection=True):
"""Save the current index as a SQLite3 DB at the specified location.
Note: This is only necessary if a database_path was not specified
at initialization, and the user now wants to save the index.
If a database_path was specified originally, there is no need to
re-save using this method.
Parameters
----------
database_path : str
The path to the desired database folder. By default,
uses .db_cache. If a relative path is passed, it is assumed to
be relative to the BIDSLayout root directory.
replace_connection : bool, optional
If True, the newly created database will
be used for all subsequent connections. This means that any
changes to the index made after the .save() call will be
reflected in the database file. If False, the previous database
will continue to be used, and any subsequent changes will not
be reflected in the new file unless save() is explicitly called
again.
"""
self.connection_manager = self.connection_manager.save_database(
database_path, replace_connection)
# Recursively save children
for pipeline_name, der in self.derivatives.items():
der.save(os.path.join(
database_path, pipeline_name))
def get_entities(self, scope='all', metadata=None):
"""Get entities for all layouts in the specified scope.
Parameters
----------
scope : str
The scope of the search space. Indicates which
BIDSLayouts' entities to extract.
See :obj:`bids.layout.BIDSLayout.get` docstring for valid values.
metadata : bool or None
By default (None), all available entities
are returned. If True, only entities found in metadata files
(and not defined for filenames) are returned. If False, only
entities defined for filenames (and not those found in JSON
sidecars) are returned.
Returns
-------
dict
Dictionary where keys are entity names and
values are Entity instances.
"""
# TODO: memoize results
layouts = self._get_layouts_in_scope(scope)
entities = {}
for l in layouts:
query = l.session.query(Entity)
if metadata is not None:
query = query.filter_by(is_metadata=metadata)
results = query.all()
entities.update({e.name: e for e in results})
return entities
def get_files(self, scope='all'):
"""Get BIDSFiles for all layouts in the specified scope.
Parameters
----------
scope : str
The scope of the search space. Indicates which
BIDSLayouts' entities to extract.
See :obj:`bids.layout.BIDSLayout.get` docstring for valid values.
Returns:
A dict, where keys are file paths and values
are :obj:`bids.layout.BIDSFile` instances.
"""
# TODO: memoize results
layouts = self._get_layouts_in_scope(scope)
files = {}
for l in layouts:
results = l.session.query(BIDSFile).all()
files.update({f.path: f for f in results})
return files
def clone(self):
"""Return a deep copy of the current BIDSLayout."""
return copy.deepcopy(self)
def parse_file_entities(self, filename, scope='all', entities=None,
config=None, include_unmatched=False):
"""Parse the passed filename for entity/value pairs.
Parameters
----------
filename : str
The filename to parse for entity values
scope : str or list, optional
The scope of the search space. Indicates which BIDSLayouts'
entities to extract. See :obj:`bids.layout.BIDSLayout.get`
docstring for valid values. By default, extracts all entities.
entities : list or None, optional
An optional list of Entity instances to use in
extraction. If passed, the scope and config arguments are
ignored, and only the Entities in this list are used.
config : str or :obj:`bids.layout.models.Config` or list or None, optional
One or more :obj:`bids.layout.models.Config` objects, or paths
to JSON config files on disk, containing the Entity definitions
to use in extraction. If passed, scope is ignored.
include_unmatched : bool, optional
If True, unmatched entities are included
in the returned dict, with values set to None. If False
(default), unmatched entities are ignored.
Returns
-------
dict
Dictionary where keys are Entity names and values are the
values extracted from the filename.
"""
# If either entities or config is specified, just pass through
if entities is None and config is None:
layouts = self._get_layouts_in_scope(scope)
config = chain(*[list(l.config.values()) for l in layouts])
config = list(set(config))
return parse_file_entities(filename, entities, config,
include_unmatched)
def add_derivatives(self, path, parent_database_path=None, **kwargs):
"""Add BIDS-Derivatives datasets to tracking.
Parameters
----------
path : str or list
One or more paths to BIDS-Derivatives datasets.
Each path can point to either a derivatives/ directory
containing one more more pipeline directories, or to a single
pipeline directory (e.g., derivatives/fmriprep).
parent_database_path : str
If not None, use the pipeline name from the dataset_description.json
file as the database folder name to nest within the parent database
folder name to write out derivative index to.
kwargs : dict
Optional keyword arguments to pass on to
BIDSLayout() when initializing each of the derivative datasets.
Notes
-----
Every derivatives directory intended for indexing MUST contain a
valid dataset_description.json file. See the BIDS-Derivatives
specification for details.
"""
paths = listify(path)
deriv_paths = validate_derivative_paths(paths, self, **kwargs)
# Default config and sources values
kwargs['config'] = kwargs.get('config') or ['bids', 'derivatives']
kwargs['sources'] = kwargs.get('sources') or self
for name, deriv in deriv_paths.items():
if parent_database_path:
child_database_path = os.path.join(parent_database_path, name)
kwargs['database_path'] = child_database_path
self.derivatives[name] = BIDSLayout(deriv, **kwargs)
def to_df(self, metadata=False, **filters):
"""Return information for BIDSFiles tracked in Layout as pd.DataFrame.
Parameters
----------
metadata : bool, optional
If True, includes columns for all metadata fields.
If False, only filename-based entities are included as columns.
filters : dict, optional
Optional keyword arguments passed on to get(). This allows
one to easily select only a subset of files for export.
Returns
-------
:obj:`pandas.DataFrame`
A pandas DataFrame, where each row is a file, and each column is
a tracked entity. NaNs are injected whenever a file has no
value for a given attribute.
"""
try:
import pandas as pd
except ImportError:
raise ImportError('Missing dependency: "pandas"')
# TODO: efficiency could probably be improved further by joining the
# BIDSFile and Tag tables and running a single query. But this would
# require refactoring the below to use _build_file_query, which will
# in turn likely require generalizing the latter.
files = self.get(**filters)
file_paths = [f.path for f in files]
query = self.session.query(Tag).filter(Tag.file_path.in_(file_paths))
if not metadata:
query = query.join(Entity).filter(Entity.is_metadata == False)
tags = query.all()
tags = [[t.file_path, t.entity_name, t.value] for t in tags]
data = pd.DataFrame(tags, columns=['path', 'entity', 'value'])
data = data.pivot('path', 'entity', 'value')
# Add in orphaned files with no Tags. Maybe make this an argument?
orphans = list(set(file_paths) - set(data.index))
for o in orphans:
data.loc[o] = pd.Series()
return data.reset_index()
def get(self, return_type='object', target=None, scope='all',
regex_search=False, absolute_paths=None, invalid_filters='error',
**filters):
"""Retrieve files and/or metadata from the current Layout.
Parameters
----------
return_type : str, optional
Type of result to return. Valid values:
'object' (default): return a list of matching BIDSFile objects.
'file' or 'filename': return a list of matching filenames.
'dir': return a list of directories.
'id': return a list of unique IDs. Must be used together
with a valid target.
target : str, optional
Optional name of the target entity to get results for
(only used if return_type is 'dir' or 'id').
scope : str or list, optional
Scope of the search space. If passed, only
nodes/directories that match the specified scope will be
searched. Possible values include:
'all' (default): search all available directories.
'derivatives': search all derivatives directories.
'raw': search only BIDS-Raw directories.
'self': search only the directly called BIDSLayout.
<PipelineName>: the name of a BIDS-Derivatives pipeline.
regex_search : bool or None, optional
Whether to require exact matching
(False) or regex search (True) when comparing the query string
to each entity.
absolute_paths : bool, optional
Optionally override the instance-wide option
to report either absolute or relative (to the top of the
dataset) paths. If None, will fall back on the value specified
at BIDSLayout initialization.
invalid_filters (str): Controls behavior when named filters are
encountered that don't exist in the database (e.g., in the case of
a typo like subbject='0.1'). Valid values:
'error' (default): Raise an explicit error.
'drop': Silently drop invalid filters (equivalent to not having
passed them as arguments in the first place).
'allow': Include the invalid filters in the query, resulting
in no results being returned.
filters : dict
Any optional key/values to filter the entities on.
Keys are entity names, values are regexes to filter on. For
example, passing filters={'subject': 'sub-[12]'} would return
only files that match the first two subjects. In addition to
ordinary data types, the following enums are defined (in the
Query class):
* Query.NONE: The named entity must not be defined.
* Query.ANY: the named entity must be defined, but can have any
value.
Returns
-------
list of :obj:`bids.layout.BIDSFile` or str
A list of BIDSFiles (default) or strings (see return_type).
"""
if absolute_paths is False:
absolute_path_deprecation_warning()
layouts = self._get_layouts_in_scope(scope)
entities = self.get_entities()
# error check on users accidentally passing in filters
if isinstance(filters.get('filters'), dict):
raise RuntimeError('You passed in filters as a dictionary named '
'filters; please pass the keys in as named '
'keywords to the `get()` call. For example: '
'`layout.get(**filters)`.')
# Strip leading periods if extensions were passed
if 'extension' in filters and 'bids' in self.config:
# XXX 0.14: Disable drop_dot option
drop_dot = (self.config['bids'].entities['extension'].pattern ==
'[._]*[a-zA-Z0-9]*?\\.([^/\\\\]+)$')
exts = listify(filters['extension'])
if drop_dot:
filters['extension'] = [x.lstrip('.') if isinstance(x, str) else x
for x in exts]
else:
filters['extension'] = ['.' + x.lstrip('.') if isinstance(x, str) else x
for x in exts]
if invalid_filters != 'allow':
bad_filters = set(filters.keys()) - set(entities.keys())
if bad_filters:
if invalid_filters == 'drop':
for bad_filt in bad_filters:
filters.pop(bad_filt)
elif invalid_filters == 'error':
first_bad = list(bad_filters)[0]
msg = "'{}' is not a recognized entity. ".format(first_bad)
ents = list(entities.keys())
suggestions = difflib.get_close_matches(first_bad, ents)
if suggestions:
msg += "Did you mean {}? ".format(suggestions)
raise ValueError(msg + "If you're sure you want to impose "
"this constraint, set "
"invalid_filters='allow'.")
# Provide some suggestions if target is specified and invalid.
if target is not None and target not in entities:
potential = list(entities.keys())
suggestions = difflib.get_close_matches(target, potential)
if suggestions:
message = "Did you mean one of: {}?".format(suggestions)
else:
message = "Valid targets are: {}".format(potential)
raise TargetError(("Unknown target '{}'. " + message)
.format(target))
results = []
for l in layouts:
query = l._build_file_query(filters=filters,
regex_search=regex_search)
# NOTE: The following line, when uncommented, eager loads
# associations. This was introduced in order to prevent sessions
# from randomly detaching. It should be fixed by setting
# expire_on_commit at session creation, but let's leave this here
# for another release or two to make sure we don't have any further
# problems.
# query = query.options(joinedload(BIDSFile.tags)
# .joinedload(Tag.entity))
results.extend(query.all())
# Convert to relative paths if needed
if absolute_paths is None: # can be overloaded as option to .get
absolute_paths = self.absolute_paths
if not absolute_paths:
for i, fi in enumerate(results):
fi = copy.copy(fi)
fi.path = os.path.relpath(fi.path, self.root)
results[i] = fi
if return_type.startswith('file'):
results = natural_sort([f.path for f in results])
elif return_type in ['id', 'dir']:
if target is None:
raise TargetError('If return_type is "id" or "dir", a valid '
'target entity must also be specified.')
results = [x for x in results if target in x.entities]
if return_type == 'id':
results = list(set([x.entities[target] for x in results]))
results = natural_sort(results)
elif return_type == 'dir':
template = entities[target].directory
if template is None:
raise ValueError('Return type set to directory, but no '
'directory template is defined for the '
'target entity (\"%s\").' % target)
# Construct regex search pattern from target directory template
template = self.root + template
to_rep = re.findall(r'{(.*?)\}', template)
for ent in to_rep:
patt = entities[ent].pattern
template = template.replace('{%s}' % ent, patt)
template += r'[^\%s]*$' % os.path.sep
matches = [
f.dirname if absolute_paths else os.path.relpath(f.dirname, self.root) # noqa: E501
for f in results
if re.search(template, f.dirname)
]
results = natural_sort(list(set(matches)))
else:
raise ValueError("Invalid return_type specified (must be one "
"of 'tuple', 'filename', 'id', or 'dir'.")
else:
results = natural_sort(results, 'path')
return results
def get_file(self, filename, scope='all'):
"""Return the BIDSFile object with the specified path.
Parameters
----------
filename : str
The path of the file to retrieve. Must be either an absolute path,
or relative to the root of this BIDSLayout.
scope : str or list, optional
Scope of the search space. If passed, only BIDSLayouts that match
the specified scope will be searched. See :obj:`BIDSLayout.get`
docstring for valid values. Default is 'all'.
Returns
-------
:obj:`bids.layout.BIDSFile` or None
File found, or None if no match was found.
"""
filename = os.path.abspath(os.path.join(self.root, filename))
for layout in self._get_layouts_in_scope(scope):
result = layout.session.query(
BIDSFile).filter_by(path=filename).first() # noqa: E501
if result:
return result
return None
def _build_file_query(self, **kwargs):
query = self.session.query(BIDSFile).filter_by(is_dir=False)
filters = kwargs.get('filters')
# Entity filtering
if filters:
query = query.join(BIDSFile.tags)
regex = kwargs.get('regex_search', False)
filters = self._sanitize_query_dtypes(filters)
for name, val in filters.items():
if isinstance(val, (list, tuple)) and len(val) == 1:
val = val[0]
if val is None or isinstance(val, enum.Enum):
name_clause = query.filter(BIDSFile.tags.any(entity_name=name))
if val == Query.ANY:
query = name_clause
else:
query = query.except_(name_clause)
continue
if regex:
if isinstance(val, (list, tuple)):
val_clause = sa.or_(*[Tag._value.op('REGEXP')(str(v))
for v in val])
else:
val_clause = Tag._value.op('REGEXP')(str(val))
else:
if isinstance(val, (list, tuple)):
val_clause = Tag._value.in_(val)
else:
val_clause = Tag._value == val
subq = sa.and_(Tag.entity_name == name, val_clause)
query = query.filter(BIDSFile.tags.any(subq))
return query
def get_collections(self, level, types=None, variables=None, merge=False,
sampling_rate=None, skip_empty=False, **kwargs):
"""Return one or more variable Collections in the BIDS project.
Parameters
----------
level : {'run', 'session', 'subject', 'dataset'}
The level of analysis to return variables for.
Must be one of 'run', 'session','subject', or 'dataset'.
types : str or list
Types of variables to retrieve. All valid values reflect the
filename stipulated in the BIDS spec for each kind of variable.
Valid values include: 'events', 'physio', 'stim', 'scans',
'participants', 'sessions', and 'regressors'. Default is None.
variables : list
Optional list of variables names to return. If None, all available
variables are returned.
merge : bool
If True, variables are merged across all observations of the
current level. E.g., if level='subject', variables from all
subjects will be merged into a single collection. If False, each
observation is handled separately, and the result is returned
as a list.
sampling_rate : int or str
If level='run', the sampling rate to pass onto the returned
:obj:`bids.variables.collections.BIDSRunVariableCollection`.
skip_empty : bool
Whether or not to skip empty Variables (i.e., where there are no
rows/records in a file after applying any filtering operations
like dropping NaNs).
kwargs
Optional additional arguments to pass onto
:obj:`bids.variables.io.load_variables`.
Returns
-------
list of :obj:`bids.variables.collections.BIDSVariableCollection`
or :obj:`bids.variables.collections.BIDSVariableCollection`
A list if merge=False;
a single :obj:`bids.variables.collections.BIDSVariableCollection`
if merge=True.
"""
from bids.variables import load_variables
index = load_variables(self, types=types, levels=level,
skip_empty=skip_empty, **kwargs)
return index.get_collections(level, variables, merge,
sampling_rate=sampling_rate)
def get_metadata(self, path, include_entities=False, scope='all'):
"""Return metadata found in JSON sidecars for the specified file.
Parameters
----------
path : str
Path to the file to get metadata for.
include_entities : bool, optional
If True, all available entities extracted
from the filename (rather than JSON sidecars) are included in
the returned metadata dictionary.
scope : str or list, optional
The scope of the search space. Each element must
be one of 'all', 'raw', 'self', 'derivatives', or a
BIDS-Derivatives pipeline name. Defaults to searching all
available datasets.
Returns
-------
dict
A dictionary of key/value pairs extracted from all of the
target file's associated JSON sidecars.
Notes
-----
A dictionary containing metadata extracted from all matching .json
files is returned. In cases where the same key is found in multiple
files, the values in files closer to the input filename will take
precedence, per the inheritance rules in the BIDS specification.
"""
md = BIDSMetadata(str(path))
for layout in self._get_layouts_in_scope(scope):
query = (layout.session.query(Tag)
.join(BIDSFile)
.filter(BIDSFile.path == path))
if not include_entities:
query = query.join(Entity).filter(Entity.is_metadata == True)
results = query.all()
if results:
md.update({t.entity_name: t.value for t in results})
return md
return md
def get_dataset_description(self, scope='self', all_=False):
"""Return contents of dataset_description.json.
Parameters
----------
scope : str
The scope of the search space. Only descriptions of
BIDSLayouts that match the specified scope will be returned.
See :obj:`bids.layout.BIDSLayout.get` docstring for valid values.
Defaults to 'self' --i.e., returns the dataset_description.json
file for only the directly-called BIDSLayout.
all_ : bool
If True, returns a list containing descriptions for
all matching layouts. If False (default), returns for only the
first matching layout.
Returns
-------
dict or list of dict
a dictionary or list of dictionaries (depending on all_).
"""
layouts = self._get_layouts_in_scope(scope)
if not all_:
return layouts[0].get_file('dataset_description.json').get_dict()
return [l.get_file('dataset_description.json').get_dict()
for l in layouts]
def get_nearest(self, path, return_type='filename', strict=True,
all_=False, ignore_strict_entities='extension',
full_search=False, **filters):
"""Walk up file tree from specified path and return nearest matching file(s).
Parameters
----------
path (str): The file to search from.
return_type (str): What to return; must be one of 'filename'
(default) or 'tuple'.
strict (bool): When True, all entities present in both the input
path and the target file(s) must match perfectly. When False,
files will be ordered by the number of matching entities, and
partial matches will be allowed.
all_ (bool): When True, returns all matching files. When False
(default), only returns the first match.
ignore_strict_entities (str, list): Optional entity/entities to
exclude from strict matching when strict is True. This allows
one to search, e.g., for files of a different type while
matching all other entities perfectly by passing
ignore_strict_entities=['type']. Ignores extension by default.
full_search (bool): If True, searches all indexed files, even if
they don't share a common root with the provided path. If
False, only files that share a common root will be scanned.
filters : dict
Optional keywords to pass on to :obj:`bids.layout.BIDSLayout.get`.
"""
path = os.path.abspath(path)
# Make sure we have a valid suffix
if not filters.get('suffix'):
f = self.get_file(path)
if 'suffix' not in f.entities:
raise BIDSValidationError(
"File '%s' does not have a valid suffix, most "
"likely because it is not a valid BIDS file." % path
)
filters['suffix'] = f.entities['suffix']
# Collect matches for all entities
entities = {}
for ent in self.get_entities(metadata=False).values():
m = ent.regex.search(path)
if m:
entities[ent.name] = ent._astype(m.group(1))
# Remove any entities we want to ignore when strict matching is on
if strict and ignore_strict_entities is not None:
for k in listify(ignore_strict_entities):
entities.pop(k, None)
# Get candidate files
results = self.get(**filters)
# Make a dictionary of directories --> contained files
folders = defaultdict(list)
for f in results:
folders[f.dirname].append(f)
# Build list of candidate directories to check
search_paths = []
while True:
if path in folders and folders[path]:
search_paths.append(path)
parent = os.path.dirname(path)
if parent == path:
break
path = parent
if full_search:
unchecked = set(folders.keys()) - set(search_paths)
search_paths.extend(path for path in unchecked if folders[path])
def count_matches(f):
# Count the number of entities shared with the passed file
f_ents = f.entities
keys = set(entities.keys()) & set(f_ents.keys())
shared = len(keys)
return [shared, sum([entities[k] == f_ents[k] for k in keys])]
matches = []
for path in search_paths:
# Sort by number of matching entities. Also store number of
# common entities, for filtering when strict=True.
num_ents = [[f] + count_matches(f) for f in folders[path]]
# Filter out imperfect matches (i.e., where number of common
# entities does not equal number of matching entities).
if strict:
num_ents = [f for f in num_ents if f[1] == f[2]]
num_ents.sort(key=lambda x: x[2], reverse=True)
if num_ents:
for f_match in num_ents:
matches.append(f_match[0])
if not all_:
break
matches = [match.path if return_type.startswith('file')
else match for match in matches]
return matches if all_ else matches[0] if matches else None
def get_bvec(self, path, **kwargs):
"""Get bvec file for passed path."""
result = self.get_nearest(path, extension='.bvec', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0]
def get_bval(self, path, **kwargs):
"""Get bval file for passed path."""
result = self.get_nearest(path, suffix='dwi', extension='.bval',
all_=True, **kwargs)
return listify(result)[0]
def get_fieldmap(self, path, return_list=False):
"""Get fieldmap(s) for specified path."""
fieldmaps = self._get_fieldmaps(path)
if return_list:
return fieldmaps
else:
if len(fieldmaps) == 1:
return fieldmaps[0]
elif len(fieldmaps) > 1:
raise ValueError("More than one fieldmap found, but the "
"'return_list' argument was set to False. "
"Either ensure that there is only one "
"fieldmap for this image, or set the "
"'return_list' argument to True and handle "
"the result as a list.")
else: # len(fieldmaps) == 0
return None
def _get_fieldmaps(self, path):
sub = self.parse_file_entities(path)['subject']
fieldmap_set = []
suffix = '(phase1|phasediff|epi|fieldmap)'
files = self.get(subject=sub, suffix=suffix, regex_search=True,
extension=['.nii.gz', '.nii'])
for file in files:
metadata = self.get_metadata(file.path)
if metadata and "IntendedFor" in metadata.keys():
intended_for = listify(metadata["IntendedFor"])
if any([path.endswith(_suff) for _suff in intended_for]):
cur_fieldmap = {}
if file.entities['suffix'] == "phasediff":
cur_fieldmap = {"phasediff": file.path,
"magnitude1": file.path.replace(
"phasediff", "magnitude1"),
"suffix": "phasediff"}
magnitude2 = file.path.replace(
"phasediff", "magnitude2")
if os.path.isfile(magnitude2):
cur_fieldmap['magnitude2'] = magnitude2
elif file.entities['suffix'] == "phase1":
cur_fieldmap["phase1"] = file.path
cur_fieldmap["magnitude1"] = \
file.path.replace("phase1", "magnitude1")
cur_fieldmap["phase2"] = \
file.path.replace("phase1", "phase2")
cur_fieldmap["magnitude2"] = \
file.path.replace("phase1", "magnitude2")
cur_fieldmap["suffix"] = "phase"
elif file.entities['suffix'] == "epi":
cur_fieldmap["epi"] = file.path
cur_fieldmap["suffix"] = "epi"
elif file.entities['suffix'] == "fieldmap":
cur_fieldmap["fieldmap"] = file.path
cur_fieldmap["magnitude"] = \
file.path.replace("fieldmap", "magnitude")
cur_fieldmap["suffix"] = "fieldmap"
fieldmap_set.append(cur_fieldmap)
return fieldmap_set
def get_tr(self, derivatives=False, **filters):
"""Return the scanning repetition time (TR) for one or more runs.
Parameters
----------
derivatives : bool
If True, also checks derivatives images.
filters : dict
Optional keywords used to constrain the selected runs.
Can be any arguments valid for a .get call (e.g., BIDS entities
or JSON sidecar keys).
Returns
-------
float
A single float.
Notes
-----
Raises an exception if more than one unique TR is found.
"""
# Constrain search to functional images
filters.update(suffix='bold', datatype='func')
scope = 'all' if derivatives else 'raw'
images = self.get(extension=['.nii', '.nii.gz'], scope=scope,
**filters)
if not images:
raise NoMatchError("No functional images that match criteria found.")
all_trs = set()
for img in images:
md = self.get_metadata(img.path)
all_trs.add(round(float(md['RepetitionTime']), 5))
if len(all_trs) > 1:
raise NoMatchError("Unique TR cannot be found given filters {!r}"
.format(filters))
return all_trs.pop()
def build_path(self, source, path_patterns=None, strict=False,
scope='all', validate=True, absolute_paths=None):
"""Construct a target filename for a file or dictionary of entities.
Parameters
----------
source : str or :obj:`bids.layout.BIDSFile` or dict
The source data to use to construct the new file path.
Must be one of:
- A BIDSFile object
- A string giving the path of a BIDSFile contained within the
current Layout.
- A dict of entities, with entity names in keys and values in
values
path_patterns : list
Optional path patterns to use to construct
the new file path. If None, the Layout-defined patterns will
be used. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
angle brackets. Default values can be assigned by specifying a string
after the pipe operator. E.g., (e.g., {type<image>|bold} would
only match the pattern if the entity 'type' was passed and its
value is "image", otherwise the default value "bold" will be
used).
Example: 'sub-{subject}/[var-{name}/]{id}.csv'
Result: 'sub-01/var-SES/1045.csv'
strict : bool, optional
If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
scope : str or list, optional
The scope of the search space. Indicates which
BIDSLayouts' path patterns to use. See BIDSLayout docstring
for valid values. By default, uses all available layouts. If
two or more values are provided, the order determines the
precedence of path patterns (i.e., earlier layouts will have
higher precedence).
validate : bool, optional
If True, built path must pass BIDS validator. If
False, no validation is attempted, and an invalid path may be
returned (e.g., if an entity value contains a hyphen).
absolute_paths : bool, optional
Optionally override the instance-wide option
to report either absolute or relative (to the top of the
dataset) paths. If None, will fall back on the value specified
at BIDSLayout initialization.
"""
# 'is_file' is a crude check for Path objects
if isinstance(source, str) or hasattr(source, 'is_file'):
source = str(source)
if source not in self.files:
source = os.path.join(self.root, source)
source = self.get_file(source)
if isinstance(source, BIDSFile):
source = source.entities
if path_patterns is None:
layouts = self._get_layouts_in_scope(scope)
path_patterns = []
seen_configs = set()
for l in layouts:
for c in l.config.values():
if c in seen_configs:
continue
if c.default_path_patterns is not None:
path_patterns.extend(c.default_path_patterns)
seen_configs.add(c)
built = build_path(source, path_patterns, strict)
if built is None:
raise ValueError(
"Unable to construct build path with source {}".format(source))
to_check = os.path.join(os.path.sep, built)
if validate and not BIDSValidator().is_bids(to_check):
raise BIDSValidationError(
"Built path {} is not a valid BIDS filename. "
"Please make sure all provided entity values are "
"spec-compliant.".format(built))
# Convert to absolute paths if needed
if absolute_paths is None:
absolute_paths = self.absolute_paths
if absolute_paths:
built = os.path.join(self.root, built)
return built
def copy_files(self, files=None, path_patterns=None, symbolic_links=True,
root=None, conflicts='fail', **kwargs):
"""Copy BIDSFile(s) to new locations.
The new locations are defined by each BIDSFile's entities and the
specified `path_patterns`.
Parameters
----------
files : list
Optional list of BIDSFile objects to write out. If
none provided, use files from running a get() query using
remaining **kwargs.
path_patterns : str or list
Write patterns to pass to each file's write_file method.
symbolic_links : bool
Whether to copy each file as a symbolic link or a deep copy.
root : str
Optional root directory that all patterns are relative
to. Defaults to dataset root.
conflicts : str
Defines the desired action when the output path already exists.
Must be one of:
'fail': raises an exception
'skip' does nothing
'overwrite': overwrites the existing file
'append': adds a suffix to each file copy, starting with 1
kwargs : dict
Optional key word arguments to pass into a get() query.
"""
root = self.root if root is None else root
_files = self.get(**kwargs)
if files:
_files = list(set(files).intersection(_files))
for f in _files:
f.copy(path_patterns, symbolic_link=symbolic_links,
root=root, conflicts=conflicts)
def write_to_file(self, entities, path_patterns=None,
contents=None, link_to=None, copy_from=None,
content_mode='text', conflicts='fail',
strict=False, validate=True):
"""Write data to a file defined by the passed entities and patterns.
Parameters
----------
entities : dict
A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns : list
Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents : object
Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
link_to : str
Optional path with which to create a symbolic link
to. Used as an alternative to and takes priority over the
contents argument.
conflicts : str
Defines the desired action when the output path already exists.
Must be one of:
'fail': raises an exception
'skip' does nothing
'overwrite': overwrites the existing file
'append': adds a suffix to each file copy, starting with 1
strict : bool
If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
validate : bool
If True, built path must pass BIDS validator. If
False, no validation is attempted, and an invalid path may be
returned (e.g., if an entity value contains a hyphen).
"""
path = self.build_path(entities, path_patterns, strict,
validate=validate)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_to_file(path, contents=contents, link_to=link_to,
copy_from=copy_from, content_mode=content_mode,
conflicts=conflicts, root=self.root)
class Query(enum.Enum):
"""Enums for use with BIDSLayout.get()."""
NONE = 1 # Entity must not be present
ANY = 2 # Entity must be defined, but with an arbitrary value
| mit |
iABC2XYZ/abc | Epics/rnn/DataRnnBPM1.5.py | 1 | 4527 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import matplotlib.pyplot as plt
plt.close('all')
numEpoch=2000000
batchSize= 50
stepRec = 200
learningRate=0.01
rnnSize=16
rnnDepth=1
numInput=10
numOutput=14
nameFolder='/home/e/ABC/abc/Epics/rnn/'
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.Variable(tf.zeros([shape]))
return tf.Variable(initial)
def getDataRowAll(exData):
yCHV=exData[:,0:14]
xBPM=exData[:,14:24]
return xBPM,yCHV
def getDataRowBatch(exData,batchSize):
numEx = np.shape(exData)[0]
numChosen=numEx-np.floor(numEx/batchSize)*batchSize
idChoose=np.random.randint(0, high=numChosen)
xBPM1 = np.reshape(exData[idChoose:idChoose+batchSize, 14:24], (batchSize, 10))
yCHV1 = np.reshape(exData[idChoose:idChoose + batchSize, 0:14], (batchSize, 14))
yCHV1[11]=0.
return xBPM1, yCHV1
#
dataTrain=np.loadtxt(nameFolder+'recTrain.dat')
dataTest=np.loadtxt(nameFolder+'recTest.dat')
#
bpm=tf.placeholder(tf.float32,shape=(None,numInput))
cHV=tf.placeholder(tf.float32,shape=(None,numOutput))
xInput=bpm
yInput=cHV
##
def RNN(numInput,rnnSize,rnnDepth):
wRNNpre=GenWeight((numInput,rnnSize))
bRNNpre=GenBias((rnnSize))
rnnInput=[tf.nn.xw_plus_b(xInput,wRNNpre,bRNNpre)]
wRNN=GenWeight((rnnSize,rnnSize))
bRNN=GenBias((rnnSize))
cellLSTM = tf.nn.rnn_cell.LSTMCell(rnnSize, state_is_tuple=True)
cellRNN=tf.nn.rnn_cell.DropoutWrapper(tf.contrib.rnn.MultiRNNCell([cellLSTM] * rnnDepth),output_keep_prob=0.5)
outRNN, stateRNN = tf.contrib.rnn.static_rnn(cellRNN, rnnInput, dtype=tf.float32)
rnnOutput=tf.nn.xw_plus_b(outRNN[-1],wRNN,bRNN)
xRnnOutput=tf.nn.relu(rnnOutput)
return xRnnOutput
xRnnOutput=RNN(numInput,rnnSize,rnnDepth)
##
wFinal = GenWeight((rnnSize, numOutput))
bFinal = GenBias((numOutput))
xFinal=tf.nn.xw_plus_b(xRnnOutput,wFinal,bFinal)
##
xOutput=tf.reshape(xFinal,(-1,numOutput))
yOutput=tf.reshape(yInput,(-1,numOutput))
##---------------------------------------------------------
lossFn = tf.sqrt(tf.losses.mean_squared_error(xOutput , yOutput))
trainBPM = tf.train.AdamOptimizer(learningRate)
optBPM = trainBPM.minimize(lossFn)
iniBPM = tf.global_variables_initializer()
try:
if vars().has_key('se'):
se.close()
except:
pass
se = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se.run(iniBPM)
nLossRec = np.int32(numEpoch/ stepRec + 1)
lossRec = []
lossTestRec = []
lossRecMean = []
lossTestRecMean = []
rCHV=[]
for i in range(np.int32(numEpoch)):
xBPM, yCHV = getDataRowBatch(dataTrain,batchSize)
se.run(optBPM, feed_dict={bpm: xBPM, cHV: yCHV})
if i % stepRec==0:
xTestBPM, yTestCHV = getDataRowBatch(dataTrain, batchSize)
lossTestBPM=se.run(lossFn, feed_dict={bpm: xTestBPM, cHV: yTestCHV})
lossBPM=se.run(lossFn, feed_dict={bpm: xBPM, cHV: yCHV})
lossRec.append(lossBPM)
lossTestRec.append(lossTestBPM)
lossRecMean.append(np.mean(lossRec))
lossTestRecMean.append(np.mean(lossTestRec))
if len(lossRec)>=stepRec:
if i % (stepRec*2)==0:
lossRec.pop(0)
lossTestRec.pop(0)
lossRecMean.pop(0)
lossTestRecMean.pop(0)
print(lossBPM,lossTestBPM)
plt.figure('Loss')
plt.clf()
plt.subplot(121)
plt.plot(lossRec,'b.')
plt.plot(lossRecMean, 'r.')
plt.grid('on')
plt.title(str(i)+' lossTrain lossTest')
plt.subplot(122)
plt.plot(lossTestRec, 'b.')
plt.plot(lossTestRecMean,'r.')
plt.grid('on')
plt.pause(0.001)
#####--------------------------------------------
xExBPM=np.array([np.mean(xBPM[:,0]),0.,0,0,0,np.mean(xBPM[:,5]),0,0,0,0])[:,np.newaxis].T
yExCHV=se.run(xFinal, feed_dict={bpm: xExBPM})
rCHV.append(np.mean(np.square(yExCHV)))
if len(lossRec)>=stepRec:
if i % (stepRec*2)==0:
rCHV.pop(0)
plt.figure('RCHV')
plt.clf()
plt.plot(rCHV,'.')
print('----- xExBPM --------')
print(np.round(xExBPM*100)/100.)
print('----- yExCHV --------')
print(np.round(yExCHV*100)/100)
print('END') | gpl-3.0 |
kevin-intel/scikit-learn | sklearn/conftest.py | 2 | 7197 | import os
from os import environ
from functools import wraps
import platform
import sys
import pytest
from threadpoolctl import threadpool_limits
from _pytest.doctest import DoctestItem
from sklearn.utils import _IS_32BIT
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.externals import _pilutil
from sklearn._min_dependencies import PYTEST_MIN_VERSION
from sklearn.utils.fixes import np_version, parse_version
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import fetch_covtype
from sklearn.datasets import fetch_kddcup99
from sklearn.datasets import fetch_olivetti_faces
from sklearn.datasets import fetch_rcv1
if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION):
raise ImportError('Your version of pytest is too old, you should have '
'at least pytest >= {} installed.'
.format(PYTEST_MIN_VERSION))
dataset_fetchers = {
'fetch_20newsgroups_fxt': fetch_20newsgroups,
'fetch_20newsgroups_vectorized_fxt': fetch_20newsgroups_vectorized,
'fetch_california_housing_fxt': fetch_california_housing,
'fetch_covtype_fxt': fetch_covtype,
'fetch_kddcup99_fxt': fetch_kddcup99,
'fetch_olivetti_faces_fxt': fetch_olivetti_faces,
'fetch_rcv1_fxt': fetch_rcv1,
}
def _fetch_fixture(f):
"""Fetch dataset (download if missing and requested by environment)."""
download_if_missing = environ.get('SKLEARN_SKIP_NETWORK_TESTS', '1') == '0'
@wraps(f)
def wrapped(*args, **kwargs):
kwargs['download_if_missing'] = download_if_missing
try:
return f(*args, **kwargs)
except IOError as e:
if str(e) != "Data not found and `download_if_missing` is False":
raise
pytest.skip("test is enabled when "
"SKLEARN_SKIP_NETWORK_TESTS=0")
return pytest.fixture(lambda: wrapped)
# Adds fixtures for fetching data
fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups)
fetch_20newsgroups_vectorized_fxt = \
_fetch_fixture(fetch_20newsgroups_vectorized)
fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing)
fetch_covtype_fxt = _fetch_fixture(fetch_covtype)
fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99)
fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces)
fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1)
def pytest_collection_modifyitems(config, items):
"""Called after collect is completed.
Parameters
----------
config : pytest config
items : list of collected items
"""
run_network_tests = environ.get('SKLEARN_SKIP_NETWORK_TESTS', '1') == '0'
skip_network = pytest.mark.skip(
reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
# download datasets during collection to avoid thread unsafe behavior
# when running pytest in parallel with pytest-xdist
dataset_features_set = set(dataset_fetchers)
datasets_to_download = set()
for item in items:
if not hasattr(item, "fixturenames"):
continue
item_fixtures = set(item.fixturenames)
dataset_to_fetch = item_fixtures & dataset_features_set
if not dataset_to_fetch:
continue
if run_network_tests:
datasets_to_download |= dataset_to_fetch
else:
# network tests are skipped
item.add_marker(skip_network)
# Only download datasets on the first worker spawned by pytest-xdist
# to avoid thread unsafe behavior. If pytest-xdist is not used, we still
# download before tests run.
worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0")
if worker_id == "gw0" and run_network_tests:
for name in datasets_to_download:
dataset_fetchers[name]()
for item in items:
# FeatureHasher is not compatible with PyPy
if (item.name.endswith(('_hash.FeatureHasher',
'text.HashingVectorizer'))
and platform.python_implementation() == 'PyPy'):
marker = pytest.mark.skip(
reason='FeatureHasher is not compatible with PyPy')
item.add_marker(marker)
# Known failure on with GradientBoostingClassifier on ARM64
elif (item.name.endswith('GradientBoostingClassifier')
and platform.machine() == 'aarch64'):
marker = pytest.mark.xfail(
reason=(
'know failure. See '
'https://github.com/scikit-learn/scikit-learn/issues/17797' # noqa
)
)
item.add_marker(marker)
# numpy changed the str/repr formatting of numpy arrays in 1.14. We want to
# run doctests only for numpy >= 1.14.
skip_doctests = False
try:
if np_version < parse_version('1.14'):
reason = 'doctests are only run for numpy >= 1.14'
skip_doctests = True
elif _IS_32BIT:
reason = ('doctest are only run when the default numpy int is '
'64 bits.')
skip_doctests = True
elif sys.platform.startswith("win32"):
reason = ("doctests are not run for Windows because numpy arrays "
"repr is inconsistent across platforms.")
skip_doctests = True
except ImportError:
pass
if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
elif not _pilutil.pillow_installed:
skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
for item in items:
if item.name in [
"sklearn.feature_extraction.image.PatchExtractor",
"sklearn.feature_extraction.image.extract_patches_2d"]:
item.add_marker(skip_marker)
@pytest.fixture(scope='function')
def pyplot():
"""Setup and teardown fixture for matplotlib.
This fixture checks if we can import matplotlib. If not, the tests will be
skipped. Otherwise, we setup matplotlib backend and close the figures
after running the functions.
Returns
-------
pyplot : module
The ``matplotlib.pyplot`` module.
"""
matplotlib = pytest.importorskip('matplotlib')
matplotlib.use('agg')
pyplot = pytest.importorskip('matplotlib.pyplot')
yield pyplot
pyplot.close('all')
def pytest_runtest_setup(item):
"""Set the number of openmp threads based on the number of workers
xdist is using to prevent oversubscription.
Parameters
----------
item : pytest item
item to be processed
"""
try:
xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
except KeyError:
# raises when pytest-xdist is not installed
return
openmp_threads = _openmp_effective_n_threads()
threads_per_worker = max(openmp_threads // xdist_worker_count, 1)
threadpool_limits(threads_per_worker, user_api='openmp')
| bsd-3-clause |
Chandra-MARX/marxs | marxs/visualization/tests/test_utils.py | 2 | 6107 | # Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
import pytest
from ..utils import (plane_with_hole, combine_disjoint_triangulations,
get_color, color_tuple_to_hex,
MARXSVisualizationWarning,
DisplayDict)
from ..mayavi import plot_object
def test_hole_round():
'''Ensure that plane around the inner hole closes properly at the last point.'''
outer = np.array([[-1, -1, 1, 1], [-1, 1, 1, -1], [0,0,0,0]]).T
# Try out a rectangle and a circle - the two cases we are most likely to need.
for n in [3, 4, 90, 360]:
inner = np.zeros((n, 3))
phi = np.linspace(0, 2 * np.pi, n, endpoint=False)
inner[:, 0] = np.sin(phi)
inner[:, 1] = np.cos(phi)
xyz, triangles = plane_with_hole(outer, inner)
assert triangles[0][1] == triangles[-1][2]
tri = np.array(triangles)
# Technically, the order of the points in each triangle
# is irrelevant, but the current implementation does it this way
# and that's an easy way to check.
# Check first point is always on outer rim
assert set(tri[:, 0]) == set([0,1,2,3])
# Check all points turn up in the middle
assert set(tri[:, 1]) == set(np.arange(4 + n))
# Check last point is always on inner rim
assert set(tri[:, 2]) == set(np.arange(4, 4 + n))
def test_stack_triangulations():
xyz = np.array([[-1. , -1. , 0. ],
[-1. , 1. , 0. ],
[ 1. , 1. , 0. ],
[ 1. , -1. , 0. ]])
triangles = np.array([[0, 1, 2], [0, 2, 3]])
xyz_out, tri_out = combine_disjoint_triangulations([xyz, xyz + 5.3],
[triangles, triangles])
assert np.all(xyz_out[:4, :] == xyz)
assert np.allclose(xyz_out[4:, :], xyz + 5.3)
assert np.all(tri_out[:2, :] == triangles)
assert np.all(tri_out[2:, :] == triangles + 4)
def test_color_roundtrip():
'''Test that the different color convertes are consistent.'''
colortup = get_color({'color': 'white'})
assert colortup == (1.0, 1.0, 1.0)
hexstr = color_tuple_to_hex(colortup)
assert hexstr == '0xffffff'
assert get_color({'color': '#ffffff'}) == (1.0, 1.0, 1.0)
# repeat with a second color that has different values for rgb
colortup = get_color({'color': '#ff013a'})
# Note that matplotlib expects hex strings with '#' while python uses '0x'
assert color_tuple_to_hex(colortup) == '0xff013a'
# Int input
assert color_tuple_to_hex((255, 255, 255)) == '0xffffff'
def test_color_hex_pad():
'''Regression test: We want to color hexstring with leading zeroths'''
assert color_tuple_to_hex((0., 1., .5)) == '0x00ff7f'
def test_color_to_hex_bad_input():
with pytest.raises(ValueError) as e:
out = color_tuple_to_hex('white')
assert 'Input tuple must be all' in str(e.value)
with pytest.raises(ValueError) as e:
out = color_tuple_to_hex((-1, 0, 234))
assert 'Int values in color tuple' in str(e.value)
with pytest.raises(ValueError) as e:
out = color_tuple_to_hex((1., 3., 0.3))
assert 'Float values in color tuple' in str(e.value)
def test_no_display_warnings():
'''Check that warnings are emitted without for classes, functions, and others'''
class NoDisplay(object):
pass
with pytest.warns(MARXSVisualizationWarning) as record:
plot_object(NoDisplay())
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert 'No display dictionary found.' in record[0].message.args[0]
def f():
pass
with pytest.warns(MARXSVisualizationWarning) as record:
plot_object(f)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert 'No display dictionary found.' in record[0].message.args[0]
with pytest.warns(MARXSVisualizationWarning) as record:
plot_object(range)
assert len(record) == 1
assert 'No display dictionary found' in record[0].message.args[0]
def test_warning_unknownshape():
'''Test warning (and no crash) for plotting stuff of unknown shape.'''
class Display():
display = {'shape': 'cilinder'}
with pytest.warns(MARXSVisualizationWarning) as record:
plot_object(Display())
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert 'No function to plot cilinder.' in record[0].message.args[0]
def test_warning_noshapeset():
'''Test warning (and no crash) for plotting stuff of unknown shape.'''
class Display():
display = {'form': 'cilinder'}
with pytest.warns(MARXSVisualizationWarning) as record:
plot_object(Display())
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert '"shape" not set in display dict.' in record[0].message.args[0]
def test_DiplayDict():
'''Simple mock-up using a DisplayDict variable.'''
class A(object):
pass
a = A()
b = A()
b.geometry = A()
b.geometry.value = 5
a.display = DisplayDict(a, r=7)
b.display = DisplayDict(b, value_1='w')
assert a.display['r'] == 7
assert b.display['value_1'] == 'w'
assert b.display['value'] == 5
# value is set in display and in geometry
b.display['value'] = 6
assert b.display['value'] == 6
assert b.geometry.value == 5
# values added to geometry later
b.geometry.value2 = 'q'
assert b.display['value2'] == 'q'
# Make sure there is the right error only when used on an object without geomtry
with pytest.raises(KeyError) as e:
temp = a.display['q']
# Make sure there is the right error if something is not found
with pytest.raises(KeyError) as e:
temp = b.display['1']
# get works, too
assert b.display.get('value') == 6
assert a.display.get('qwer') is None
assert a.display.get('asdf', 5) == 5
| gpl-3.0 |
mne-tools/mne-python | mne/viz/backends/tests/test_utils.py | 14 | 1696 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import pytest
from mne.viz.backends._utils import _get_colormap_from_array, _check_color
def test_get_colormap_from_array():
"""Test setting a colormap."""
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
cmap = _get_colormap_from_array()
assert isinstance(cmap, LinearSegmentedColormap)
cmap = _get_colormap_from_array(colormap='viridis')
assert isinstance(cmap, ListedColormap)
cmap = _get_colormap_from_array(colormap=[1, 1, 1],
normalized_colormap=True)
assert isinstance(cmap, ListedColormap)
cmap = _get_colormap_from_array(colormap=[255, 255, 255],
normalized_colormap=False)
assert isinstance(cmap, ListedColormap)
def test_check_color():
"""Test color format."""
assert _check_color('red') == (1., 0., 0.)
assert _check_color((0., 1., 0., 1.)) == (0., 1., 0., 1.)
assert _check_color((0, 0, 255, 255)) == (0, 0, 255, 255)
with pytest.raises(ValueError, match='RGB or RGBA'):
_check_color([255, 0])
with pytest.raises(ValueError, match='out of range'):
_check_color([256, 0, 0])
with pytest.raises(ValueError, match='out of range'):
_check_color([-1.0, 0.0, 0.0])
with pytest.raises(TypeError, match='Expected data type'):
_check_color(['foo', 'bar', 'foo'])
with pytest.raises(TypeError, match='Expected type'):
_check_color(None)
| bsd-3-clause |
CORE-GATECH-GROUP/serpent-tools | serpentTools/objects/xsdata.py | 1 | 13731 | """Holds cross section data pertaining to Serpent xsplot output."""
from collections.abc import Mapping
import numpy as np
from matplotlib import pyplot
from serpentTools.messages import error
from serpentTools.objects.base import NamedObject
from serpentTools.utils.plot import magicPlotDocDecorator, formatPlot
__all__ = [
'XSData',
]
class XSData(NamedObject):
"""Base class for storing cross section data an xsplot file
Parameters
----------
name : str
Name of this material
metadata : dict
Dictionary with file metadata. Expects ``egrid`` as a key
at least
isIso : bool, optional
Flag indicating if this data section is for a single
isotope or for a material
Attributes
----------
isIso : bool
Whether this describes individual isotope XS, or whole-material XS
MT : list
Macroscopic cross section integers
MTdescip : list
Descriptions of reactions in ``MT``
xsdata : numpy.ndarray
Array of xs data. Rows correspond to items in :attr:`MT`
hasNuData : bool
True if nu data is present
energies : numpy.ndarray
Energy grid [MeV]
metadata : dict
File-wide metadata from the reader. Alias for accessing
:attr:`energies`. Will be removed in the future
"""
MTdescriptions = {
-1: "Macro total",
-2: "Macro total capture",
-3: "Macro total elastic scatter",
-4: "Macro total heating",
-5: "Macro total photon production",
-6: "Macro total fission",
-7: "Macro total fission neutron production",
-8: "Total fission energy production",
-9: "Majorant macro",
-10: "Macro scattering recoil heating",
-11: "Source rate",
-15: "neutron density",
-16: "Macro total scattering neutron production",
-53: "Macro proton production",
-54: "Macro deutron production",
-55: "Macro triton production",
-56: "Macro He-3 production",
-57: "Macro He-4 production",
-100: "User response function",
}
def __init__(self, name, metadata, isIso=False):
super().__init__(name)
self.isIso = isIso
# metadata reference
self.energies = metadata["egrid"]
self.metadata = metadata
# Possible reactions on this material / nuclide
# Maps MT integer number to a description
self.MT = []
self.MTdescrip = []
# Holds XS numeric data
self.xsdata = None
# whether nu data present for fissionables
self.hasNuData = False
def __len__(self):
"""Number of reactions stored"""
return len(self.MT)
def __getitem__(self, mt):
"""Return data corresponding to a given mt
Parameters
----------
mt : int
Integer MT reaction number
Returns
-------
numpy.ndarray
Cross section data for this mt
Raises
------
AttributeError
If :attr:`xsdata` is empty
KeyError
If ``mt`` not found in :attr:`MT`
"""
if self.xsdata is None:
raise AttributeError("xsdata not populated")
try:
index = self.MT.index(mt)
except ValueError as ve:
raise KeyError(mt) from ve
return self.xsdata[:, index]
def get(self, mt, default=None):
"""Return data corresponding to a given mt or a default
Parameters
----------
mt : int
Integer MT reaction number
default : object
Object to be returned in ``mt`` is not found
Returns
-------
object
:class:`numpy.ndarray` if ``mt`` is found. Otherwise
``default``
Raises
------
AttributeError
If :attr:`xsdata` is empty
"""
try:
return self[mt]
except KeyError:
return default
@staticmethod
def negativeMTDescription(mt):
"""Descriptions for Serpent negative MT numbers
These correspond to macroscopic properties, like
fission energy production, and for neutrons only.
From Serpent Wiki
Parameters
----------
mt : int
Macroscopic reaction MT. Must be negative
Returns
-------
str
Description
"""
if mt > 0:
raise ValueError("{} is not negative".format(mt))
return XSData.MTdescriptions[mt]
def describe(self, mt):
"""Return the description for any reaction MT
Parameters
----------
mt : int
Integer reaction number, e.g. 102 or -8. Assumes
neutrons only
Returns
-------
str
Description for this reaction
"""
if mt < 0:
return XSData.MTdescriptions[mt]
index = self.MT.index(mt)
return self.MTdescrip[index]
def setMTs(self, chunk):
""" Parse chunk to MT numbers and descriptions"""
if not self.isIso:
self.MT = [int(c) for c in chunk[1:]]
else:
self.MT = [int(c.split('%')[0]) for c in chunk[1:]]
# Make MT descriptions
if not self.isIso:
for mt in self.MT:
self.MTdescrip.append(self.negativeMTDescription(mt))
else:
self.MTdescrip = [c.split('%')[1].strip() for c in chunk[1:]]
def setData(self, chunk):
""" Parse data from chunk to np array."""
self.xsdata = np.zeros([len(self.metadata['egrid']), len(self.MT)])
for i, line in enumerate(chunk[1:]):
self.xsdata[i, :] = np.array(line.split(), dtype=np.float64)
def setNuData(self, chunk):
""" Add fission neutrons per fission data """
self.hasNuData = True
self.nuData = np.zeros([len(self.metadata['egrid']), 2],
dtype=np.float64)
for i, cc in enumerate(chunk[1:]):
self.nuData[i, :] = np.array(cc.split(), dtype=np.float64)
def hasExpectedData(self):
""" Check that the expected data (MT numbers, an energy grid, etc)
were collected. """
if not isinstance(self.xsdata, np.ndarray):
return False
if not self.MT:
return False
return True
def tabulate(self, mts='all', colnames=None):
""" Returns a pandas table, for pretty tabulation in Jupyter
notebooks.
Parameters
----------
mts: int, string, or list of ints
If it's a string, it should be 'all', which is default.
A single int indicates one MT reaction number.
A list should be a list of MT numbers to plot.
colnames: any type with string representation
Column names for each MT number, if you'd like to change them.
Returns
-------
pandas.DataFrame
Tabulated representation of the cross section data
Raises
------
ImportError
If ``pandas`` is not installed
TypeError
if MT numbers that don't make sense come up
"""
import pandas as pd
if len(self.metadata['egrid']) > 99:
y = input('This is about to be a big table. Still want it? (y/n)')
if not (y == 'y' or y == 'Y'):
pass
else:
return None
if mts == 'all':
mts = self.MT
elif isinstance(mts, int):
# convert to list if it's just one MT
mts = [mts]
elif isinstance(mts, list) and all(
[isinstance(ii, int) for ii in mts]):
pass
else:
msg = ("mts argument must be a string saying 'all',"
"a list of integer MTs, or a single interger"
"instead, {} of type {} was passed."
.format(mts, type(mts)))
raise TypeError(msg)
for mt in mts:
if mt not in self.MT:
error("{} not in collected MT numbers, {}".format(mt, self.MT))
cols2use = []
mtnums = []
for mt in mts:
for i, MT in enumerate(self.MT):
if mt == MT:
cols2use.append(i)
mtnums.append(mt)
frame = pd.DataFrame(self.xsdata[:, cols2use])
unit = ' b' if self.isIso else ' cm$^{-1}$'
frame.columns = colnames or ['MT ' + str(mt) + unit for mt in mtnums]
frame.insert(0, 'Energy (MeV)', self.metadata['egrid'])
return frame
@magicPlotDocDecorator
def plot(self, mts='all', ax=None, loglog=False, xlabel=None, ylabel=None,
logx=True, logy=False, title=None, legend=None, ncol=1,
labels=None, **kwargs):
"""
Plot XS corresponding to their MTs.
Parameters
----------
mts : int, string, or list of ints
If it's a string, it should be 'all'.
A single int indicates one MT reaction number.
A list should be a list of MT numbers to plot.
{ax}
{loglog}
{logx}
{logy}
{xlabel}
{ylabel}
{title}
{legend}
{ncol}
labels : str or list of str or dict {int: str}
Labels to apply to the plot. Defaults to labeling by MT
description. If a string, then ``mts`` must be a single
integer. If a list of strings, each label will be applied
to each entry in ``mts``. If a dictionary, keys must be
mts and their labels as values. The number of keys do
not have to align with the number of MTs
{kwargs} :func:`matplotlib.pyplot.plot`
Returns
-------
{rax}
Raises
------
TypeError
If MT numbers that don't make sense come up
"""
mts = self._processPlotMts(mts)
userlabel = kwargs.pop("label", None)
if userlabel is not None:
# Allow label to be passed for single MT plots
# Little easier to remember and it makes more sense.
# Don't allow mixed label / labels arguments
if labels is not None:
raise ValueError(
"Passing label and labels is not allowed. Prefer labels")
if len(mts) == 1:
labels = [userlabel]
else:
raise ValueError("Use labels when plotting multiple MTs")
else:
labels = self._processPlotLabels(mts, labels)
ax = ax or pyplot.gca()
kwargs.setdefault("drawstyle", "steps")
for mt, label in zip(mts, labels):
y = self[mt]
ax.plot(self.energies, y, label=label, **kwargs)
title = title or '{} cross section{}'.format(
self.name, 's' if len(mts) > 1 else '')
xlabel = xlabel or "Energy [MeV]"
ylabel = ylabel or ('Cross Section ({})'.format('b' if self.isIso
else 'cm$^{-1}$'))
ax = formatPlot(
ax, loglog=loglog, logx=logx, logy=logy, legendcols=ncol,
legend=legend, title=title, xlabel=xlabel, ylabel=ylabel)
return ax
def _processPlotMts(self, mts):
if mts == 'all':
mts = self.MT
elif isinstance(mts, int):
# convert to list if it's just one MT
mts = [mts]
elif isinstance(mts, list) and all(
[isinstance(ii, int) for ii in mts]):
pass
else:
msg = ("mts argument must be a string saying 'all',"
"a list of integer MTs, or a single interger"
"instead, {} of type {} was passed."
.format(mts, type(mts)))
raise TypeError(msg)
for mt in mts:
if mt not in self.MT:
raise ValueError(
"{} not in collected MT numbers, {}".format(mt, self.MT))
return mts
def _processPlotLabels(self, mts, labels):
if isinstance(labels, str):
if len(mts) != 1:
raise ValueError(
"Labels and mts do not align: {} mts, 1 label".format(
len(mts)))
return [labels]
if labels is None:
return [self.describe(mt) for mt in mts]
if isinstance(labels, Mapping):
out = []
for i, mt in enumerate(mts):
out.append(labels.get(mt, self.MTdescrip[i]))
return out
if len(mts) != len(labels):
raise ValueError(
"Labels and mts do not align: {} mts, {} labels".format(
len(mts), len(labels)))
return labels
def showMT(self, retstring=False):
"""Create a pretty-print style string of the MT values avaialable
Parameters
----------
retstring : bool
Return a string if true. Otherwise, print it
"""
outstr = ""
outstr += "MT numbers available for {}:\n".format(self.name)
outstr += "--------------------------" + len(self.name) * '-' + '\n'
for i, mt in enumerate(self.MT):
if self.isIso:
descr = self.MTdescrip[i]
else:
descr = XSData.negativeMTDescription(mt)
spaces = (4 - len(str(mt))) * ' '
outstr += str(mt) + spaces + descr + '\n'
if retstring:
return outstr
else:
print(outstr)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.