repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
pypyrus/pypyrus | jupyter/config/jupyter_notebook_config.py | 1 | 19505 | #--- nbextensions configuration ---
from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
import os
import sys
# nbextensions #
#data_dir = jupyter_data_dir()
data_dir = os.path.join(os.getcwd(), 'jupyter', 'data')
sys.path.append(os.path.join(data_dir, 'extensions'))
c = get_config()
c.NotebookApp.server_extensions = ['nbextensions']
c.NotebookApp.extra_template_paths = [os.path.join(data_dir,'templates') ]
#--- nbextensions configuration ---
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Generate default config file.
# c.JupyterApp.generate_config = False
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
#
# c.NotebookApp.file_to_run = ''
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = ''
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'>
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# c.NotebookApp.default_url = '/notebooks/index.ipynb'
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = traitlets.Undefined
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = traitlets.Undefined
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# path to file containing execution key.
# c.Session.keyfile = ''
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# execution key, for signing messages.
# c.Session.key = b''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Username for the Session. Default is your system username.
# c.Session.username = 'root'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The UUID identifying this session.
# c.Session.session = ''
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints = traitlets.Undefined
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = ''
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| gpl-2.0 |
etsrepo/currentcostgui | currentcostgraphs.py | 9 | 7933 | # -*- coding: utf-8 -*-
#
# CurrentCost GUI
#
# Copyright (C) 2008 Dale Lane
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The author of this code can be contacted at Dale.Lane@gmail.com
# Any contact about this application is warmly welcomed.
#
import wx
import wx.aui
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx, _load_bitmap
from matplotlib.dates import DayLocator, HourLocator, MonthLocator, YearLocator, WeekdayLocator, DateFormatter, drange
from matplotlib.patches import Rectangle, Patch
from matplotlib.text import Text
#
# Implements the tabs we use in the GUI - either to draw a graph, or a TextPage
# for the 'trends' page.
#
# Also includes a custom toolbar for use with Matplotlib graphs
#
# Dale Lane (http://dalelane.co.uk/blog)
class Plot(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id = -1):
# parent is a frame --> MyFrame (wx.Frame)
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self, style=wx.aui.AUI_NB_TAB_MOVE)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self,name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page,name)
return page.figure
def deletepage(self,pagename):
for i in range(0, self.nb.GetPageCount()):
if self.nb.GetPageText(i) == pagename:
self.nb.DeletePage(i)
return
def selectpage(self,pagename):
for i in range(0, self.nb.GetPageCount()):
if self.nb.GetPageText(i) == pagename:
self.nb.SetSelection(i)
return
def addtextpage(self,name):
page = TextPage(self.nb)
self.nb.AddPage(page,name)
return page
#
# we override the matplotlib toolbar class to remove the subplots function,
# which we do not use
#
class Toolbar(NavigationToolbar2Wx):
ON_CUSTOM_LEFT = wx.NewId()
ON_CUSTOM_RIGHT = wx.NewId()
# rather than copy and edit the whole (rather large) init function, we run
# the super-classes init function as usual, then go back and delete the
# button we don't want
def __init__(self, plotCanvas):
CONFIGURE_SUBPLOTS_TOOLBAR_BTN_POSITION = 6
NavigationToolbar2Wx.__init__(self, plotCanvas)
# delete the toolbar button we don't want
self.DeleteToolByPos(CONFIGURE_SUBPLOTS_TOOLBAR_BTN_POSITION)
# add the new toolbar buttons that we do want
self.AddSimpleTool(self.ON_CUSTOM_LEFT, _load_bitmap('stock_left.xpm'),
'Pan to the left', 'Pan graph to the left')
wx.EVT_TOOL(self, self.ON_CUSTOM_LEFT, self._on_custom_pan_left)
self.AddSimpleTool(self.ON_CUSTOM_RIGHT, _load_bitmap('stock_right.xpm'),
'Pan to the right', 'Pan graph to the right')
wx.EVT_TOOL(self, self.ON_CUSTOM_RIGHT, self._on_custom_pan_right)
# in theory this should never get called, because we delete the toolbar
# button that calls it. but in case it does get called (e.g. if there
# is a keyboard shortcut I don't know about) then we override the method
# that gets called - to protect against the exceptions that it throws
def configure_subplot(self, evt):
print 'ERROR: This application does not support subplots'
# pan the graph to the left
def _on_custom_pan_left(self, evt):
ONE_SCREEN = 7 # we default to 1 week
axes = self.canvas.figure.axes[0]
x1,x2 = axes.get_xlim()
ONE_SCREEN = x2 - x1
axes.set_xlim(x1 - ONE_SCREEN, x2 - ONE_SCREEN)
self.canvas.draw()
# pan the graph to the right
def _on_custom_pan_right(self, evt):
ONE_SCREEN = 7 # we default to 1 week
axes = self.canvas.figure.axes[0]
x1,x2 = axes.get_xlim()
ONE_SCREEN = x2 - x1
axes.set_xlim(x1 + ONE_SCREEN, x2 + ONE_SCREEN)
self.canvas.draw()
#
# a GUI tab that we can write text to
#
# used to implement the 'trends' page in the GUI
#
# includes a helper function to update the text displayed on this page
#
class TextPage(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
#
self.text = wx.StaticText(self, -1, "Your CurrentCost data", wx.Point(30, 20))
self.text.SetFont(wx.Font(13, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.text.SetSize(self.text.GetBestSize())
#
self.trend1 = wx.StaticText(self, -1, "will be described here after data is received...", wx.Point(35, 80))
self.trend1.SetFont(wx.Font(11, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.trend1.SetSize(self.trend1.GetBestSize())
#
self.trend2 = wx.StaticText(self, -1, " ",wx.Point(35, 120))
self.trend2.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend2.SetSize(self.trend2.GetBestSize())
#
self.trend3 = wx.StaticText(self, -1, " ",wx.Point(35, 160))
self.trend3.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend3.SetSize(self.trend3.GetBestSize())
#
self.trend4 = wx.StaticText(self, -1, " ",wx.Point(35, 200))
self.trend4.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend4.SetSize(self.trend4.GetBestSize())
#
self.trend5 = wx.StaticText(self, -1, " ",wx.Point(35, 240))
self.trend5.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend5.SetSize(self.trend5.GetBestSize())
#
self.trend6 = wx.StaticText(self, -1, " ",wx.Point(35, 280))
self.trend6.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend6.SetSize(self.trend6.GetBestSize())
#
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
def UpdateTrendText(self, trendnum, trendtext):
if trendnum == 1:
self.trend1.SetLabel(trendtext)
self.trend1.SetFont(wx.Font(11, wx.DEFAULT, wx.NORMAL, wx.NORMAL))
self.trend1.SetSize(self.trend1.GetBestSize())
elif trendnum == 2:
self.trend2.SetLabel(trendtext)
elif trendnum == 3:
self.trend3.SetLabel(trendtext)
elif trendnum == 4:
self.trend4.SetLabel(trendtext)
elif trendnum == 5:
self.trend5.SetLabel(trendtext)
elif trendnum == 6:
self.trend6.SetLabel(trendtext)
| gpl-3.0 |
alvations/Sensible-SemEval | xgboost_ensemble.py | 2 | 2022 | import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
import operator
types = {'m1': np.dtype(float), 'm2': np.dtype(float), 'm3': np.dtype(float), 'm4': np.dtype(float),
'm5': np.dtype(float), 'target': np.dtype(float)}
train_valid = pd.read_csv("ensemble.train",dtype=types, delimiter=' ')
#etas = [0.01, 0.03, 0.05, 0.07, 0.10, 0.13, 0.15, 0.17, 0.16, 0.2]
params = {"objective": "binary:logistic",
"booster" : "gbtree",
"eta": 0.13,
"max_depth": 10,
"subsample": 0.9,
"colsample_bytree": 0.6,
"silent": 1,
"seed": 0,
'eval_metric': 'error'
}
num_boost_round = 200
features = ['m{}'.format(i) for i in range(1,6)]
X_train, X_valid = train_test_split(train_valid, test_size=0.20, random_state=10)
y_train = np.log1p(X_train.target)
y_valid = np.log1p(X_valid.target)
dtrain = xgb.DMatrix(X_train[features], y_train)
dvalid = xgb.DMatrix(X_valid[features], y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
gbm = xgb.train(params, dtrain, num_boost_round, evals=watchlist, \
early_stopping_rounds=100, verbose_eval=True)
#print("Validating")
yhat = gbm.predict(xgb.DMatrix(X_valid[features]))
testtypes = {'f1': np.dtype(float), 'f2': np.dtype(float), 'f3': np.dtype(float), 'f4': np.dtype(float),
'f5': np.dtype(float), 'f6': np.dtype(float), 'f7': np.dtype(float), 'f8': np.dtype(float),
'f9': np.dtype(float), 'f10': np.dtype(float), 'f11': np.dtype(float), 'f12': np.dtype(float),
'f13': np.dtype(float), 'f14': np.dtype(float), 'c1': np.dtype(str), 't_id':np.dtype(str)}
testtypes = {'m1': np.dtype(float), 'm2': np.dtype(float), 'm3': np.dtype(float), 'm4': np.dtype(float),
'm5': np.dtype(float)}
test = pd.read_csv("ensemble.test",dtype=testtypes, delimiter=' ')
dtest = xgb.DMatrix(test[features])
results = gbm.predict(dtest)
# Make Submission
for r in results:
print (int(r>=0.5))
| mit |
kdmurray91/scikit-bio | skbio/stats/ordination/tests/test_ordination_results.py | 1 | 12214 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from skbio import OrdinationResults
class TestOrdinationResults(unittest.TestCase):
def setUp(self):
# Define in-memory CA results to serialize and deserialize.
eigvals = pd.Series([0.0961330159181, 0.0409418140138], ['CA1', 'CA2'])
features = np.array([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]])
samples = np.array([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]])
features_ids = ['Species1', 'Species2', 'Species3']
sample_ids = ['Site1', 'Site2', 'Site3']
samples_df = pd.DataFrame(samples, index=sample_ids,
columns=['CA1', 'CA2'])
features_df = pd.DataFrame(features, index=features_ids,
columns=['CA1', 'CA2'])
self.ordination_results = OrdinationResults(
'CA', 'Correspondance Analysis', eigvals=eigvals,
samples=samples_df, features=features_df)
# DataFrame for testing plot method. Has a categorical column with a
# mix of numbers and strings. Has a numeric column with a mix of ints,
# floats, and strings that can be converted to floats. Has a numeric
# column with missing data (np.nan).
self.df = pd.DataFrame([['foo', '42', 10],
[22, 0, 8],
[22, -4.2, np.nan],
['foo', '42.19', 11]],
index=['A', 'B', 'C', 'D'],
columns=['categorical', 'numeric', 'nancolumn'])
# Minimal ordination results for easier testing of plotting method.
# Paired with df above.
eigvals = np.array([0.50, 0.25, 0.25])
samples = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
samples_df = pd.DataFrame(samples, ['A', 'B', 'C', 'D'],
['PC1', 'PC2', 'PC3'])
self.min_ord_results = OrdinationResults(
'PCoA', 'Principal Coordinate Analysis', eigvals, samples_df)
def test_str(self):
exp = ("Ordination results:\n"
"\tMethod: Correspondance Analysis (CA)\n"
"\tEigvals: 2\n"
"\tProportion explained: N/A\n"
"\tFeatures: 3x2\n"
"\tSamples: 3x2\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: 'Species1', 'Species2', 'Species3'\n"
"\tSample IDs: 'Site1', 'Site2', 'Site3'")
obs = str(self.ordination_results)
self.assertEqual(obs, exp)
# all optional attributes missing
exp = ("Ordination results:\n"
"\tMethod: Principal Coordinate Analysis (PCoA)\n"
"\tEigvals: 1\n"
"\tProportion explained: N/A\n"
"\tFeatures: N/A\n"
"\tSamples: 2x1\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: N/A\n"
"\tSample IDs: 0, 1")
samples_df = pd.DataFrame(np.array([[1], [2]]))
obs = str(OrdinationResults('PCoA', 'Principal Coordinate Analysis',
pd.Series(np.array([4.2])), samples_df))
self.assertEqual(obs.split('\n'), exp.split('\n'))
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
exp_legend_exists, exp_xlabel, exp_ylabel,
exp_zlabel):
# check type
self.assertIsInstance(fig, mpl.figure.Figure)
# check number of subplots
axes = fig.get_axes()
npt.assert_equal(len(axes), exp_num_subplots)
# check title
ax = axes[0]
npt.assert_equal(ax.get_title(), exp_title)
# shouldn't have tick labels
for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
ax.get_zticklabels()):
npt.assert_equal(tick_label.get_text(), '')
# check if legend is present
legend = ax.get_legend()
if exp_legend_exists:
self.assertTrue(legend is not None)
else:
self.assertTrue(legend is None)
# check axis labels
npt.assert_equal(ax.get_xlabel(), exp_xlabel)
npt.assert_equal(ax.get_ylabel(), exp_ylabel)
npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_plot_no_metadata(self):
fig = self.min_ord_results.plot()
self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
def test_plot_with_numeric_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'numeric', axes=(1, 0, 2),
axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
self.check_basic_figure_sanity(
fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
def test_plot_with_categorical_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'categorical', axes=[2, 0, 1], title='a title',
cmap='Accent')
self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
def test_plot_with_invalid_axis_labels(self):
with self.assertRaisesRegex(ValueError, 'axis_labels.*4'):
self.min_ord_results.plot(axes=[2, 0, 1],
axis_labels=('a', 'b', 'c', 'd'))
def test_validate_plot_axes_valid_input(self):
# shouldn't raise an error on valid input. nothing is returned, so
# nothing to check here
samples = self.min_ord_results.samples.values.T
self.min_ord_results._validate_plot_axes(samples, (1, 2, 0))
def test_validate_plot_axes_invalid_input(self):
# not enough dimensions
with self.assertRaisesRegex(ValueError, '2 dimension\(s\)'):
self.min_ord_results._validate_plot_axes(
np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
coord_matrix = self.min_ord_results.samples.values.T
# wrong number of axes
with self.assertRaisesRegex(ValueError, 'exactly three.*found 0'):
self.min_ord_results._validate_plot_axes(coord_matrix, [])
with self.assertRaisesRegex(ValueError, 'exactly three.*found 4'):
self.min_ord_results._validate_plot_axes(coord_matrix,
(0, 1, 2, 3))
# duplicate axes
with self.assertRaisesRegex(ValueError, 'must be unique'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
# out of range axes
with self.assertRaisesRegex(ValueError, 'axes\[1\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
with self.assertRaisesRegex(ValueError, 'axes\[2\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
def test_get_plot_point_colors_invalid_input(self):
# column provided without df
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(None, 'numeric',
['B', 'C'], 'jet')
# df provided without column
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(self.df, None,
['B', 'C'], 'jet')
# column not in df
with self.assertRaisesRegex(ValueError, 'missingcol'):
self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
['B', 'C'], 'jet')
# id not in df
with self.assertRaisesRegex(ValueError, 'numeric'):
self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
# missing data in df
with self.assertRaisesRegex(ValueError, 'nancolumn'):
self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
['B', 'C', 'A'], 'jet')
def test_get_plot_point_colors_no_df_or_column(self):
obs = self.min_ord_results._get_plot_point_colors(None, None,
['B', 'C'], 'jet')
npt.assert_equal(obs, (None, None))
def test_get_plot_point_colors_numeric_column(self):
# subset of the ids in df
exp = [0.0, -4.2, 42.0]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp)
self.assertTrue(obs[1] is None)
# all ids in df
exp = [0.0, 42.0, 42.19, -4.2]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp)
self.assertTrue(obs[1] is None)
def test_get_plot_point_colors_categorical_column(self):
# subset of the ids in df
exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
exp_color_dict = {
'foo': [0.5, 0., 0., 1.],
22: [0., 0., 0.5, 1.]
}
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
npt.assert_equal(obs[1], exp_color_dict)
# all ids in df
exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
[0., 0., 0.5, 1.]]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
# should get same color dict as before
npt.assert_equal(obs[1], exp_color_dict)
def test_plot_categorical_legend(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# we shouldn't have a legend yet
self.assertTrue(ax.get_legend() is None)
self.min_ord_results._plot_categorical_legend(
ax, {'foo': 'red', 'bar': 'green'})
# make sure we have a legend now
legend = ax.get_legend()
self.assertTrue(legend is not None)
# do some light sanity checking to make sure our input labels and
# colors are present. we're not using nose.tools.assert_items_equal
# because it isn't available in Python 3.
labels = [t.get_text() for t in legend.get_texts()]
npt.assert_equal(sorted(labels), ['bar', 'foo'])
colors = [l.get_color() for l in legend.get_lines()]
npt.assert_equal(sorted(colors), ['green', 'red'])
def test_repr_png(self):
obs = self.min_ord_results._repr_png_()
self.assertIsInstance(obs, bytes)
self.assertTrue(len(obs) > 0)
def test_repr_svg(self):
obs = self.min_ord_results._repr_svg_()
self.assertIsInstance(obs, str)
self.assertTrue(len(obs) > 0)
def test_png(self):
self.assertIsInstance(self.min_ord_results.png, Image)
def test_svg(self):
self.assertIsInstance(self.min_ord_results.svg, SVG)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
synergetics/nest | examples/nest/plot_tsodyks_depr_fac.py | 13 | 1130 | # -*- coding: utf-8 -*-
#
# plot_tsodyks_depr_fac.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy import *
from matplotlib.pylab import *
from matplotlib.mlab import *
def plot_spikes():
dt = 0.1 # time resolution
nbins = 1000
N = 500 # number of neurons
vm = load('voltmeter-4-0.dat')
figure(1)
clf()
plot(vm[:,0], vm[:,1], 'r')
xlabel('time / ms')
ylabel('$V_m [mV]$')
savefig('test_tsodyks_depressing.png')
plot_spikes()
show()
| gpl-2.0 |
mdboom/astropy-helpers | astropy_helpers/sphinx/conf.py | 1 | 10879 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy shared Sphinx settings. These settings are shared between
# astropy itself and affiliated packages.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
import warnings
from os import path
from distutils.version import LooseVersion
from ..compat import subprocess
# -- General configuration ----------------------------------------------------
# Some of the docs require the autodoc special-members option, in 1.1.
# If using graphviz 2.30 or later, Sphinx < 1.2b2 will not work with
# it. Unfortunately, there are other problems with Sphinx 1.2b2, so
# we need to use "dev" until a release is made post 1.2b2. If
# affiliated packages don't want this automatic determination, they
# may simply override needs_sphinx in their local conf.py.
def get_graphviz_version():
try:
output = subprocess.check_output(
['dot', '-V'], stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError:
return '0'
tokens = output.split()
for token in tokens:
if re.match(b'[0-9.]+', token):
return token.decode('ascii')
return '0'
graphviz_found = LooseVersion(get_graphviz_version())
graphviz_broken = LooseVersion('0.30')
if graphviz_found >= graphviz_broken:
needs_sphinx = '1.2'
else:
needs_sphinx = '1.1'
# Configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None)
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The reST default role (used for this markup: `text`) to use for all
# documents. Set to the "smart" one.
default_role = 'obj'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
.. _Astropy: http://astropy.org
"""
# -- Project information ------------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Settings for extensions and extension options ----------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.inheritance_diagram',
'astropy_helpers.sphinx.ext.numpydoc',
'astropy_helpers.sphinx.ext.astropyautosummary',
'astropy_helpers.sphinx.ext.automodsumm',
'astropy_helpers.sphinx.ext.automodapi',
'astropy_helpers.sphinx.ext.tocdepthfix',
'astropy_helpers.sphinx.ext.doctest',
'astropy_helpers.sphinx.ext.changelog_links',
'astropy_helpers.sphinx.ext.viewcode', # Use patched version of viewcode
'astropy_helpers.sphinx.ext.smart_resolver'
]
# Above, we use a patched version of viewcode rather than 'sphinx.ext.viewcode'
# This can be changed to the sphinx version once the following issue is fixed
# in sphinx:
# https://bitbucket.org/birkenfeld/sphinx/issue/623/
# extension-viewcode-fails-with-function
try:
import matplotlib.sphinxext.plot_directive
extensions += [matplotlib.sphinxext.plot_directive.__name__]
# AttributeError is checked here in case matplotlib is installed but
# Sphinx isn't. Note that this module is imported by the config file
# generator, even if we're not building the docs.
except (ImportError, AttributeError):
warnings.warn(
"matplotlib's plot_directive could not be imported. " +
"Inline plots will not be included in the output")
# Don't show summaries of the members in each class along with the
# class' docstring
numpydoc_show_class_members = False
autosummary_generate = True
automodapi_toctreedirnm = 'api'
# Class documentation should contain *both* the class docstring and
# the __init__ docstring
autoclass_content = "both"
# -- Options for HTML output -------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [path.abspath(path.join(path.dirname(__file__), 'themes'))]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-astropy'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html'],
'search': [],
'genindex': [],
'py-modindex': [],
}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# included in the bootstrap-astropy theme
html_favicon = path.join(html_theme_path[0], html_theme, 'static',
'astropy_logo.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
% Use a more modern-looking monospace font
\usepackage{inconsolata}
% The enumitem package provides unlimited nesting of lists and enums.
% Sphinx may use this in the future, in which case this can be removed.
% See https://bitbucket.org/birkenfeld/sphinx/issue/777/latex-output-too-deeply-nested
\usepackage{enumitem}
\setlistdepth{15}
% In the parameters section, place a newline after the Parameters
% header. (This is stolen directly from Numpy's conf.py, since it
% affects Numpy-style docstrings).
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Support the superscript Unicode numbers used by the "unicode" units
% formatter
\DeclareUnicodeCharacter{2070}{\ensuremath{^0}}
\DeclareUnicodeCharacter{00B9}{\ensuremath{^1}}
\DeclareUnicodeCharacter{00B2}{\ensuremath{^2}}
\DeclareUnicodeCharacter{00B3}{\ensuremath{^3}}
\DeclareUnicodeCharacter{2074}{\ensuremath{^4}}
\DeclareUnicodeCharacter{2075}{\ensuremath{^5}}
\DeclareUnicodeCharacter{2076}{\ensuremath{^6}}
\DeclareUnicodeCharacter{2077}{\ensuremath{^7}}
\DeclareUnicodeCharacter{2078}{\ensuremath{^8}}
\DeclareUnicodeCharacter{2079}{\ensuremath{^9}}
\DeclareUnicodeCharacter{207B}{\ensuremath{^-}}
\DeclareUnicodeCharacter{00B0}{\ensuremath{^{\circ}}}
\DeclareUnicodeCharacter{2032}{\ensuremath{^{\prime}}}
\DeclareUnicodeCharacter{2033}{\ensuremath{^{\prime\prime}}}
% Make the "warning" and "notes" sections use a sans-serif font to
% make them stand out more.
\renewenvironment{notice}[2]{
\def\py@noticetype{#1}
\csname py@noticestart@#1\endcsname
\textsf{\textbf{#2}}
}{\csname py@noticeend@\py@noticetype\endcsname}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# -- Options for the linkcheck builder ----------------------------------------
# A timeout value, in seconds, for the linkcheck builder
linkcheck_timeout = 60
| bsd-3-clause |
effa/flocs | analysis/taskInstance/flow_on_time.py | 3 | 1278 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# TODO: Create infrastructure for analysis and desribe it on our wiki.
def show_flow_on_time_plot(name, show, store):
data = pd.read_csv('data/{name}.csv'.format(name=name))
plot_practice_session(data)
if store:
plt.savefig('plots/{name}_flow_on_time.pdf'.format(name=name))
if show:
plt.show()
def plot_practice_session(data):
"""
Plot a practice session (just plot, don't show or store it).
Args:
simulation: dataframe with simulation data
"""
# drop attempts without flow report
data = data[data.reported_flow != 0]
# project only two interesting columns
data = data.loc[:, ['reported_flow', 'time_spent']]
data.plot(kind='scatter', x='time_spent', y='reported_flow')
# plot style
plt.style.use('ggplot')
plt.ylabel('Report of the flow')
plt.xlabel('Time spent while solving the task')
plt.yticks([1,2,3], ('difficult', 'right', 'easy'))
plt.ylim(0.5,3.5)
plt.xlim(0,700)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 10}
plt.rc('font', **font)
if __name__ == '__main__':
show_flow_on_time_plot('TaskInstanceModel-2015-12-15', False, True)
| gpl-2.0 |
Jimmy-Morzaria/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
GuillaumeArruda/INF4705 | TP2/Python/Plot/Plot/Plot.py | 1 | 1476 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import functools
import csv
import scipy.optimize
import numpy
def main():
fxys = []
xs = []
ys = []
with open('d.csv', newline='') as file:
reader = csv.reader(file, delimiter=',')
for x, y, fxy in reader:
fxys.append(float(fxy))
xs.append(float(x))
ys.append(float(y))
points = []
for x, y, f in zip(xs, ys, fxys):
points.append((x, y, f))
params0 = [1, 1, 1]
fun = functools.partial(error, points=points)
res = scipy.optimize.minimize(fun, params0)
xx, yy = numpy.meshgrid(range(0, 5), range(0, 6))
z_plane = []
for x, y in zip(xx, yy):
z_plane.append(plane(x, y, res.x))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel("Log du nombre de locations")
ax.set_ylabel("Log de la prodcution maximale de poulet")
ax.set_zlabel("Log du temps de calcul (s)")
ax.scatter(xs, ys, zs=fxys)
ax.plot_surface(xx, yy, z_plane, color='g', alpha = 0.2)
print(res)
plt.title("Test de puissance de l'algorithme dynamique")
plt.show()
def plane(x, y, params):
a, b, c = params
return a*x + b*y + c
def error(params, points):
result = 0
for(x,y,z) in points:
plane_z = plane(x, y, params)
diff = abs(plane_z - z)
result += diff**2
return result
if __name__ == "__main__":
main() | bsd-3-clause |
robin-lai/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/io/json.py | 5 | 6650 | from __future__ import absolute_import
import io
import pandas as pd
from dask.bytes import open_files, read_bytes
import dask
def to_json(df, url_path, orient='records', lines=None, storage_options=None,
compute=True, encoding='utf-8', errors='strict',
compression=None, **kwargs):
"""Write dataframe into JSON text files
This utilises ``pandas.DataFrame.to_json()``, and most parameters are
passed through - see its docstring.
Differences: orient is 'records' by default, with lines=True; this
produces the kind of JSON output that is most common in big-data
applications, and which can be chunked when reading (see ``read_json()``).
Parameters
----------
df: dask.DataFrame
Data to save
url_path: str, list of str
Location to write to. If a string, and there are more than one
partitions in df, should include a glob character to expand into a
set of file names, or provide a ``name_function=`` parameter.
Supports protocol specifications such as ``"s3://"``.
encoding, errors:
The text encoding to implement, e.g., "utf-8" and how to respond
to errors in the conversion (see ``str.encode()``).
orient, lines, kwargs
passed to pandas; if not specified, lines=True when orient='records',
False otherwise.
storage_options: dict
Passed to backend file-system implementation
compute: bool
If true, immediately executes. If False, returns a set of delayed
objects, which can be computed at a later time.
encoding, errors:
Text conversion, ``see str.encode()``
compression : string or None
String like 'gzip' or 'xz'.
"""
if lines is None:
lines = orient == 'records'
if orient != 'records' and lines:
raise ValueError('Line-delimited JSON is only available with'
'orient="records".')
kwargs['orient'] = orient
kwargs['lines'] = lines and orient == 'records'
outfiles = open_files(
url_path, 'wt', encoding=encoding,
errors=errors,
name_function=kwargs.pop('name_function', None),
num=df.npartitions,
compression=compression,
**(storage_options or {})
)
parts = [dask.delayed(write_json_partition)(d, outfile, kwargs)
for outfile, d in zip(outfiles, df.to_delayed())]
if compute:
dask.compute(parts)
return [f.path for f in outfiles]
else:
return parts
def write_json_partition(df, openfile, kwargs):
with openfile as f:
df.to_json(f, **kwargs)
def read_json(url_path, orient='records', lines=None, storage_options=None,
blocksize=None, sample=2**20, encoding='utf-8', errors='strict',
compression='infer', **kwargs):
"""Create a dataframe from a set of JSON files
This utilises ``pandas.read_json()``, and most parameters are
passed through - see its docstring.
Differences: orient is 'records' by default, with lines=True; this
is appropriate for line-delimited "JSON-lines" data, the kind of JSON output
that is most common in big-data scenarios, and which can be chunked when
reading (see ``read_json()``). All other options require blocksize=None,
i.e., one partition per input file.
Parameters
----------
url_path: str, list of str
Location to read from. If a string, can include a glob character to
find a set of file names.
Supports protocol specifications such as ``"s3://"``.
encoding, errors:
The text encoding to implement, e.g., "utf-8" and how to respond
to errors in the conversion (see ``str.encode()``).
orient, lines, kwargs
passed to pandas; if not specified, lines=True when orient='records',
False otherwise.
storage_options: dict
Passed to backend file-system implementation
blocksize: None or int
If None, files are not blocked, and you get one partition per input
file. If int, which can only be used for line-delimited JSON files,
each partition will be approximately this size in bytes, to the nearest
newline character.
sample: int
Number of bytes to pre-load, to provide an empty dataframe structure
to any blocks wihout data. Only relevant is using blocksize.
encoding, errors:
Text conversion, ``see bytes.decode()``
compression : string or None
String like 'gzip' or 'xz'.
Returns
-------
dask.DataFrame
Examples
--------
Load single file
>>> dd.read_json('myfile.1.json') # doctest: +SKIP
Load multiple files
>>> dd.read_json('myfile.*.json') # doctest: +SKIP
>>> dd.read_json(['myfile.1.json', 'myfile.2.json']) # doctest: +SKIP
Load large line-delimited JSON files using partitions of approx
256MB size
>> dd.read_json('data/file*.csv', blocksize=2**28)
"""
import dask.dataframe as dd
if lines is None:
lines = orient == 'records'
if orient != 'records' and lines:
raise ValueError('Line-delimited JSON is only available with'
'orient="records".')
if blocksize and (orient != 'records' or not lines):
raise ValueError("JSON file chunking only allowed for JSON-lines"
"input (orient='records', lines=True).")
storage_options = storage_options or {}
if blocksize:
first, chunks = read_bytes(url_path, b'\n', blocksize=blocksize,
sample=sample, compression=compression,
**storage_options)
chunks = list(dask.core.flatten(chunks))
first = read_json_chunk(first, encoding, errors, kwargs)
parts = [dask.delayed(read_json_chunk)(
chunk, encoding, errors, kwargs, meta=first[:0]
) for chunk in chunks]
else:
files = open_files(url_path, 'rt', encoding=encoding, errors=errors,
compression=compression, **storage_options)
parts = [dask.delayed(read_json_file)(f, orient, lines, kwargs)
for f in files]
return dd.from_delayed(parts)
def read_json_chunk(chunk, encoding, errors, kwargs, meta=None):
s = io.StringIO(chunk.decode(encoding, errors))
s.seek(0)
df = pd.read_json(s, orient='records', lines=True, **kwargs)
if meta is not None and df.empty:
return meta
else:
return df
def read_json_file(f, orient, lines, kwargs):
with f as f:
return pd.read_json(f, orient=orient, lines=lines, **kwargs)
| gpl-3.0 |
kemerelab/NeuroHMM | helpers/hc3.py | 1 | 7529 | # hc3.py
# helper functions to load data from CRCNS hc-3 repository
import os.path
import pandas as pd
import numpy as np
import re
from mymap import Map
def get_num_electrodes(sessiondir):
numelec = 0
files = [f for f in os.listdir(sessiondir) if (os.path.isfile(os.path.join(sessiondir, f)))]
for ff in files:
try:
found = re.search('\.clu\.[0-9]+$', ff).group(0)
numelec+=1
except:
found=''
if numelec > 0:
return numelec
else:
raise ValueError('number of electrodes (shanks) could not be established...')
#datatype = ['spikes', 'eeg', 'pos', '?']
def load_data(fileroot, animal='gor01', year=2006, month=6, day=7, session='11-26-53', datatype='spikes', channels='all', fs=32552,starttime=0, verbose=False, includeUnsortedSpikes=False):
fileroot = os.path.normpath(fileroot)
anim_prefix = "{}-{}-{}".format(animal,month,day)
session_prefix = "{}-{}-{}_{}".format(year,month,day,session)
sessiondir = "{}/{}/{}".format(fileroot, anim_prefix, session_prefix)
if (datatype=='spikes'):
# NOTE: st_array[0] always corresponds to unsortable spikes (not mechanical noise). However, when includeUnsortedSpikes==True, then it gets populated
# with spike times; else, it just remains an empty list []
#filename = "{}/{}/{}/{}.clu.1".format(fileroot, anim_prefix, session_prefix, session_prefix)
filename = "{}/{}/{}/{}".format(fileroot, anim_prefix, session_prefix, session_prefix)
#print(filename)
if verbose:
print("Loading data for session in directory '{}'...".format(sessiondir))
num_elec = get_num_electrodes(sessiondir)
if verbose:
print('Number of electrode (.clu) files found:', num_elec)
st_array = []
# note: using pandas.read_table is orders of magnitude faster here than using numpy.loadtxt
for ele in np.arange(num_elec):
#%time dt1a = np.loadtxt( base_filename1 + '.clu.' + str(ele + 1), skiprows=1,dtype=int)
eudf = pd.read_table( filename + '.clu.' + str(ele + 1), header=None, names='u' ) # read unit numbers within electrode
tsdf = pd.read_table( filename + '.res.' + str(ele + 1), header=None, names='t' ) # read sample numbers for spikes
max_units = eudf.u.values[0]
eu = eudf.u.values[1:]
ts = tsdf.t.values
# discard units labeled as '0' or '1', as these correspond to mechanical noise and unsortable units
ts = ts[eu!=0]
eu = eu[eu!=0]
if not includeUnsortedSpikes:
ts = ts[eu!=1]
eu = eu[eu!=1]
for uu in np.arange(max_units-2):
st_array.append(ts[eu==uu+2])
if includeUnsortedSpikes:
st_array[0] = np.append(st_array[0], ts[eu==1]) # unit 0 now corresponds to unsortable spikes
if verbose:
print('Spike times (in sample numbers) for a total of {} units were read successfully...'.format(len(st_array)))
# make sure that spike times are sorted! (this is not true for unit 0 of the hc-3 dataset, for example):
for unit, spikes in enumerate(st_array):
st_array[unit] = np.sort(spikes)
spikes = Map()
spikes['data'] = st_array
spikes['num_electrodes'] = num_elec
spikes['num_units'] = len(st_array)
spikes['samprate'] = fs
spikes['session'] = session_prefix
return spikes
## continue from here... we want to keep cells that are inactive in some, but not all environments...
# hence when extracting info, we must take all sessions in a recording day into account, and not just a specific recording session
elif (datatype=='eeg'):
filename = "{}/{}/{}/{}.eeg".format(fileroot, anim_prefix, session_prefix, session_prefix)
if verbose:
print("Loading EEG data from file '{}'".format(filename))
num_elec = get_num_electrodes(sessiondir)
num_channels = num_elec*8
if channels=='all':
channels = list(range(0,num_channels))
if verbose:
print('Number of electrode (.clu) files found: {}, with a total of {} channels'.format(num_elec, num_channels))
dtype = np.dtype([(('ch' + str(ii)), 'i2') for ii in range(num_channels) ])
# read eeg data:
try:
eegdata = np.fromfile(filename, dtype=dtype, count=-1)
except:
print( "Unexpected error:", sys.exc_info()[0] )
raise
num_records = len(eegdata)
if verbose:
print("Successfully read {} samples for each of the {} channel(s).".format(num_records, len(channels)))
data_arr = eegdata.astype(dtype).view('i2')
data_arr = data_arr.reshape(num_records,num_channels)
eeg = Map()
eeg['data'] = data_arr[:,channels]
eeg['channels'] = channels
eeg['samprate'] = fs
eeg['starttime'] = starttime
eeg['session'] = session_prefix
return eeg
elif (datatype=='pos'):
filename = "{}/{}/{}/{}.whl".format(fileroot, anim_prefix, session_prefix, session_prefix)
print("reading position data from '{}'".format(filename))
dfwhl = pd.read_table(filename,sep='\t', skiprows=0, names=['x1', 'y1', 'x2', 'y2'] )
return dfwhl
else:
raise ValueError('datatype is not handled')
def get_recording_days_for_animal(fileroot, animal):
return [name for name in os.listdir(fileroot) if (os.path.isdir(os.path.join(fileroot, name))) & (name[0:len(animal)]==animal)]
def get_sessions_for_recording_day(fileroot, day):
fileroot = os.path.join(fileroot,day)
return [session for session in os.listdir(fileroot) if (os.path.isdir(os.path.join(fileroot, session)))]
def get_sessions(fileroot, animal='gor01', verbose=True):
sessiondf = pd.DataFrame(columns=('animal','day','session','task'))
fileroot = os.path.normpath(fileroot)
if verbose:
print("reading recording sessions for animal '{}' in directory '{}'...\n".format(animal,fileroot))
for day in get_recording_days_for_animal(fileroot, animal):
mm,dd = day.split('-')[1:]
anim_prefix = "{}-{}-{}".format(animal,mm,dd)
shortday = '-'.join([mm,dd])
for session in get_sessions_for_recording_day(fileroot, day):
infofile = "{}/{}/{}/{}.info".format(fileroot, anim_prefix, session, session)
descr = ''
try:
with open(infofile, 'r') as f:
line = f.read()
if line.split('=')[0].strip()=='task':
descr = line.split('=')[-1].strip()
if (descr == '') and (verbose == True):
print('Warning! Session type could not be established...')
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError:
print ("Could not convert data to an integer.")
except:
print ("Unexpected error:", sys.exc_info()[0])
raise
session_hhmmss = session.split('_')[-1]
sessiondf = sessiondf.append(pd.DataFrame({'animal':[animal],'day':[shortday],'session':[session_hhmmss],'task':[descr]}),ignore_index=True)
if verbose:
print(sessiondf)
return sessiondf
| mit |
thientu/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
mattjj/pyhawkes | test/test_sbm_mf.py | 2 | 3644 | import copy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab, \
DiscreteTimeNetworkHawkesModelGammaMixture
from pyhawkes.plotting.plotting import plot_network
def test_sbm_mf(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
C = 5
K = 100
T = 1000
dt = 1.0
B = 3
p = 0.4 * np.eye(C) + (0.05) * (1-np.eye(C))
# Generate from a true model
network_hypers = {'C': C, 'beta': 1.0/K, 'p': p}
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K, dt=dt, B=B,
network_hypers=network_hypers)
c = true_model.network.c
perm = np.argsort(c)
#
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A[np.ix_(perm, perm)],
true_model.weight_model.W[np.ix_(perm, perm)])
plt.pause(0.001)
# Make a new model for inference
test_network_hypers = {'C': C, 'beta': 1.0/K, 'tau0': 0.5, 'tau1': 0.5}
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, B=B,
network_hypers=test_network_hypers)
test_model.weight_model.initialize_from_gibbs(true_model.weight_model.A,
true_model.weight_model.W)
# Plot the block probabilities
plt.figure()
im = plt.imshow(test_model.network.mf_m[perm,:],
interpolation="none", cmap="Greys",
aspect=float(C)/K)
plt.xlabel('C')
plt.ylabel('K')
plt.show()
plt.pause(0.001)
# Run mean field updates for the SBM given a fixed network
N_iters = 20
c_samples = []
vlbs = []
for itr in xrange(N_iters):
if itr % 5 == 0:
print "Iteration: ", itr
# Update the plot
im.set_data(test_model.network.mf_m[perm,:])
plt.pause(0.001)
# Resample from meanfield distribution
test_model.network.resample_from_mf()
c_samples.append(copy.deepcopy(test_model.network.c))
vlbs.append(test_model.network.get_vlb() + test_model.weight_model.get_vlb())
if itr > 0:
if vlbs[-1] - vlbs[-2] < -1e-3:
print "VLBS are not increasing"
print np.array(vlbs)
# import pdb; pdb.set_trace()
raise Exception("VLBS are not increasing!")
# Take a mean field step
test_model.network.meanfieldupdate(test_model.weight_model)
plt.ioff()
# Compute sample statistics for second half of samples
c_samples = np.array(c_samples)
vlbs = np.array(vlbs)
print "True c: ", true_model.network.c
print "Test c: ", c_samples[-10:, :]
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_iters), amis, '-r')
plt.plot(np.arange(N_iters), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.figure()
plt.plot(np.arange(N_iters), vlbs)
plt.xlabel("Iteration")
plt.ylabel("VLB")
plt.show()
#
# plt.close('all')
test_sbm_mf() | mit |
jonathanunderwood/numpy | numpy/fft/fftpack.py | 4 | 45580 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``a[0]`` should contain the zero frequency term,
* ``a[1:n//2]`` should contain the positive-frequency terms,
* ``a[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``A[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
...
>>> plt.legend(('real', 'imaginary'))
...
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
sanjayankur31/nest-simulator | extras/ConnPlotter/tcd_nest.py | 20 | 6959 | # -*- coding: utf-8 -*-
#
# tcd_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Interface routines to extract synapse information from NEST.
This file provides the interface to NEST required to plot effective
kernel connectivity as total charge deposited (TCD) as a function of
mean membrane potential.
In order to use TCD plots, you need to create an instance of class
SynapsesNEST. The constructor will import NEST to obtain all necessary
information. TCD can then be obtained by calling the generated object.
NB: At present, TCD is supported only for the ht_model. NMDA charge
deposition is based on steady-state value for open channels at given
voltage.
"""
# ----------------------------------------------------------------------------
import numpy as np
__all__ = ['TCD_NEST']
# ----------------------------------------------------------------------------
class TCD(object):
"""
Access total charge deposited (TCD) information for NEST neurons.
Create one instance of this class and call it to obtain charge
information.
NB: The constructor for this class imports NEST.
NB: Currently, only ht_model is supported, with synapse types
AMPA, NMDA, GABA_A, GABA_B.
"""
# ------------------------------------------------------------------------
def __init__(self, modelList):
"""
Create TCD computer for given modelList.
The constructor instantiates NEST, including a call to
ResetKernel() and instantiates all models in modelList.
From all models derived from ht_model, synapse information
is extracted and stored. Afterward, ResetKernel() is called
once more.
modelList: tuples of (parent, model, dict)
Note: nest must have been imported before and all necessary modules
loaded.
"""
import nest
nest.ResetKernel()
# keep "list" over all models derived from ht_neuron
ht_kids = set(["ht_neuron"])
for parent, model, props in modelList:
if parent in ht_kids and model not in ht_kids:
nest.CopyModel(parent, model, props)
ht_kids.add(model)
# ht_kids now contains all models derived from ht_neuron
# We collect in _tcd_info a mapping from (targetmodel, synapstype)
# to an object containing all required information for TCD computation.
self._tcd_info = {}
for mod in ht_kids:
props = nest.GetDefaults(mod)
for syn in ['AMPA', 'GABA_A', 'GABA_B']:
self._tcd_info[(mod, syn)] = self._TcdBeta(syn, props)
self._tcd_info[(mod, 'NMDA')] = self._TcdNMDA(props)
# delete models we created
nest.ResetKernel()
# ------------------------------------------------------------------------
def __call__(self, syn_type, target, V):
"""
Return total charge deposited by a single spike through
synapse of syn_type with syn_wght onto target, given that
target has membrane potential V.
Arguments:
syn_type synapse type (string: AMPA, NMDA, GABA_A, GABA_B)
target name of target neuron model (string)
V membrane potential (double)
Returns:
charge (double)
"""
return self._tcd_info[(target, syn_type)](V)
# ------------------------------------------------------------------------
class _TcdBeta(object):
"""
Class representing plain beta-function synapse model.
"""
def __init__(self, syn, props):
"""
syn is name of synapse type.
props is property dictionary of ht_neuron.
"""
td = props['tau_decay_' + syn] # decay time
tr = props['tau_rise_' + syn] # rise time
# integral over g(t)
self._int_g = (props['g_peak_' + syn] * (td - tr) /
((tr / td) ** (tr / (td - tr)) -
(tr / td) ** (td / (td - tr))))
self._e_rev = props['E_rev_' + syn]
def __call__(self, V):
"""
V is membrane potential.
"""
return -self._int_g * (V - self._e_rev)
def __str__(self):
return "_int_g = %f, _e_rev = %f" % (self._int_g, self._e_rev)
# ------------------------------------------------------------------------
class _TcdNMDA(object):
"""
Class representing NMDA synapse model in ht_neuron.
Note: NMDA charge deposition is based on steady-state value
for open channels at given voltage.
"""
def __init__(self, props):
"""
props is property dictionary of ht_neuron.
"""
td = props['tau_decay_NMDA'] # decay time
tr = props['tau_rise_NMDA'] # rise time
# integral over g(t)
self._int_g = (props['g_peak_NMDA'] * (td - tr) /
((tr / td) ** (tr / (td - tr)) -
(tr / td) ** (td / (td - tr))))
self._e_rev = props['E_rev_NMDA']
self._v_act = props['V_act_NMDA']
self._s_act = props['S_act_NMDA']
def __call__(self, V):
"""
V is membrane potential.
"""
return (-self._int_g * (V - self._e_rev) /
(1. + np.exp((self._v_act - V) / self._s_act)))
def __str__(self):
return "_int_g = %f, _e_rev = %f, _v_act = %f, _s_act = %f" \
% (self._int_g, self._e_rev, self._v_act, self._s_act)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import matplotlib.pyplot as plt
import sys
sys.path.append('/Users/plesser/Projects/hill-model/scripts')
import ht_def_new_sq
import ht_params
htl, htc, htm = ht_def_new_sq.hill_tononi(ht_params.Params)
tcd = TCD(htm)
v = np.linspace(-90, 0, 100)
syns = ['AMPA', 'NMDA', 'GABA_A', 'GABA_B']
for s in syns:
g = np.array([tcd(s, 'Relay', vm) for vm in v])
plt.plot(v, g)
plt.legend(syns)
plt.show()
| gpl-2.0 |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/Segmentation.py | 1 | 7058 | import pylab as pl
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from scipy.ndimage import measurements
from scipy import optimize
import EqnLine as line
from skimage import io
from skimage import measure, color
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.filter import threshold_otsu, sobel
from skimage.filter import denoise_tv_chambolle
from skimage.util import img_as_ubyte
from scipy.ndimage.filters import median_filter, gaussian_filter
from peak_detect import *
import pickle
def save_data(filename, data):
import pickle
print("Saving data")
f = open(filename, 'w')
pickle.dump(data, f)
f.close()
def crop_box_3D(image, touch_pt, centres, size = 30):
"""
Crop a region around the touch point
and perform Siemens star resolution
analysis
"""
crops = []
for i in range(len(touch_pt)):
c1 = centres[i][0]
c2 = centres[i][1]
crop = image[int(touch_pt[i][0]) - size:int(touch_pt[i][0]) + size,
int(touch_pt[i][1]) - size:int(touch_pt[i][1]) + size,
int(touch_pt[i][2]) - size:int(touch_pt[i][2]) + size]
# pl.imshow(crop[:,:,30])
# pl.gray()
# pl.show()
crops.append(crop)
return crops
def watershed_3d(sphere):
"""
Markers should be int8
Image should be uint8
"""
sphere = median_filter(sphere, 3)
thresh = threshold_otsu(sphere)
sphere = (sphere >= thresh) * 1
sphere = sobel(sphere)
size = (sphere.shape[0], sphere.shape[1], sphere.shape[2])
marker = np.zeros(size, dtype=np.int16)
pl.imshow(sphere[:,:,50])
pl.show()
# mark everything outside as background
marker[5, :, :] = -1
marker[size[0] - 5, :, :] = -1
marker[:, :, 5] = -1
marker[:, :, size[2] - 5] = -1
marker[:, 5, :] = -1
marker[:, size[1] - 5, :] = -1
marker[:,0,0] = -1
# mark everything inside as a sphere
marker[size[0] / 2., size[1] / 2., size[2] / 2.] = 5
result = measurements.watershed_ift(sphere.astype(dtype=np.uint16), marker)
pl.imshow(result[:,:,50])
pl.show()
return result
def watershed_segmentation(image):
# #threshold
# image = median_filter(image, 5)
#
# filter = threshold_otsu(image)
# image = (image > filter) * 1
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)),
labels=image)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)
# fig, axes = plt.subplots(ncols=3, figsize=(8, 2.7))
# ax0, ax1, ax2 = axes
#
# ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
# ax0.set_title('Overlapping objects')
# ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
# ax1.set_title('Distances')
# ax2.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
# ax2.set_title('Separated objects')
#
# for ax in axes:
# ax.axis('off')
#
# fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
# right=1)
# plt.show()
return labels
def centres_of_mass_2D(image):
"""
Calculates centres of mass
for all the labels
"""
centroids = []
bords = []
areas = []
radius = []
for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'equivalent_diameter']):
centre = info['Centroid']
minr, minc, maxr, maxc = info['BoundingBox']
D = info['equivalent_diameter']
margin = 0
radius.append((D / 2.0))
bords.append((minr-margin, minc-margin, maxr+margin, maxc+margin))
areas.append(image[minr-margin:maxr+margin,minc-margin:maxc+margin].copy())
centroids.append(centre)
return centroids, areas, bords, radius
def watershed_slicing(image):
"""
Does the watershed algorithm slice by slice.
Then use the labeled image to calculate the centres of
mass for each slice.
"""
image = median_filter(image, 3)
thresh = threshold_otsu(image)
image = (image > thresh) * 1
N = len(image)
slice_centroids = []
slice_radius = []
for i in range(N):
slice = image[:, :, i]
labels_slice = watershed_segmentation(slice)
centroids, areas, bords, radius = centres_of_mass_2D(labels_slice)
slice_centroids.append(centroids)
slice_radius.append(radius)
# if i > 49:
# print centroids
# pl.imshow(labels_slice)
# pl.show()
return slice_centroids, slice_radius
################# DRAW TEST DATA ######################################
def draw_sphere():
import numpy as np
sphere = np.zeros((100, 100 ,100))
N = 100
radius1 = 20
radius2 = 20
centre1 = (30, 30, 50)
centre2 = (30, 69, 50)
Xc1 = centre1[0]
Yc1 = centre1[1]
Zc1 = centre1[2]
Xc2 = centre2[0]
Yc2 = centre2[1]
Zc2 = centre2[2]
Y, X, Z = np.meshgrid(np.arange(N), np.arange(N), np.arange(N))
mask1 = (((X - Xc1)**2 + (Y - Yc1)**2 + (Z - Zc1)**2) < radius1**2)
mask2 = (((X - Xc2)**2 + (Y - Yc2)**2 + (Z - Zc2)**2) < radius2**2)
sphere[mask1] = 1
sphere[mask2] = 1
return sphere
def add_noise(np_image, amount):
import numpy as np
noise = np.random.randn(np_image.shape[0],np_image.shape[1],np_image.shape[2])
norm_noise = noise/np.max(noise)
np_image = np_image + norm_noise*np.max(np_image)*amount
return np_image
#############################################################################
from test_analysis import test_analyse
sphere = draw_sphere()
sphere = add_noise(sphere, 0.3)
#sphere = gaussian_filter(sphere, 3)
centroids, radii = watershed_slicing(sphere)
rad, cent = test_analyse.analyse(radii, centroids)
touch_pt, centres = line.find_contact_3D(cent, rad, tol = 2)
#crop_img, slope = crop_box_3D(sphere, touch_pt, centres, size = 30)
pt1 = cent[0]
pt2 = cent[1]
line.touch_lines_3D(pt1, pt2, sphere)
# image = io.imread("test_slice.tif")
# sphere = np.load('sphere1.npy')
# centroids, radii = watershed_slicing(sphere)
# save_data("test_analysis/centroids.dat", centroids)
# save_data("test_analysis/radii.dat", radii)
# labels = watershed_segmentation(image)
#
# centroids, areas, bords, radius, radius2 = centres_of_mass_2D(labels)
#
# # leastsq_circle_fit(areas, centroids, bords, radius)
# # leastsq_whole(image, centroids)
# touch, centres = find_contact(centroids, radius2)
#
# crop_img, slopes = crop_box(image, touch, centres)
#
# line.eqn_line(crop_img[0], slopes[0]) | apache-2.0 |
SeldonIO/seldon-server | python/seldon/anomaly/AnomalyDetection.py | 2 | 10901 | import numpy as np
import pandas as pd
import scipy.spatial.distance as ssd
from sklearn.utils import check_array
import logging
from time import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
class iNNEDetector(object):
"""
Create an ensemble classifier for anomaly detection based on iNNE method (cite iNNE paper)
Parameters
----------
ensemble_size : int
Number of ensembles for the classifier
sample_size : int
Number of samples on each ensemble
metric : str
Metric used by iNNE. Default 'euclid'
verbose : bool
default True
"""
def __init__(self,ensemble_size=100,sample_size=32,metric='euclid',verbose=True):
self.ensemble_size = ensemble_size
self.sample_size = sample_size
self.metric = metric
self.verbose = verbose
def _D(self,x,y,metric):
"""
Calculates the distance between x and y according to metric 'metric'
Parameters
----------
x : numpy array
1-d vector of dimension d
y : numpy array
1-d vector of dimension d
metric: str
specify the metric used (default euclidian metric)
Returns
-------
D(x | y) : Distance between x and y according to metric
"""
if metric == 'euclid' or metric == 'Euclid':
return np.linalg.norm(x-y)
if metric == 'kolmogorov' or metric == 'Kolmogorov':
#check normalization
norm_x = np.around(np.linalg.norm(x),decimals=10)
norm_y = np.around(np.linalg.norm(y),decimals=10)
if norm_x == 1 and norm_y == 1:
return np.sqrt(1 - np.around(np.absolute(np.dot(x,y))),decimals=10)
else:
raise NameError('%s metric supports only normalized vectors'
% metric)
if metric == 'chebyshev' or metric == 'Chebyshev':
return ssd.chebyshev(x,y)
else:
raise NameError('%s metric not supported'
% metric)
def _generate_spheres(self,X_s):
"""
Generates set of hyperspheres from sample X_s
Parameters
----------
X_s : numpy array
dimensions: sample_size X nb_features
Returns
-------
spheres : list
list of tuples storing sphere's center, radius and nearest neighbour index
"""
spheres = []
for i in range(X_s.shape[0]):
k = int(np.random.randint(X_s.shape[0],size=1))
while k==i:
k = int(np.random.randint(X_s.shape[0],size=1))
radius = self._D(X_s[i],X_s[k],self.metric)
nn_index = k
for j in range(X_s.shape[0]):
if self._D(X_s[i],X_s[j],self.metric) < radius and j!=i:
radius = self._D(X_s[i],X_s[j],self.metric)
nn_index = j
spheres.append((X_s[i], radius, nn_index))
return spheres
def _score(self,y,spheres):
"""
Returns the anomaly score for vector y based on the given set of spheres
Parameters
----------
y : numpy array
1-d vector of dimension d to score
spheres : list
list of 3-d tuples where each tuple contain sphere center, radius and nearest neighbour index
Returns
-------
score : float
anomaly score
"""
spheres_in=[]
for sphere in spheres:
if self._D(y,sphere[0],self.metric) <= sphere[1]:
spheres_in.append(sphere)
if len(spheres_in) == 0:
B = ()
elif len(spheres_in) != 0:
B = spheres_in[int(np.random.randint(len(spheres_in),size=1))]
for sphere_in in spheres_in:
if sphere_in[1] < B[1]:
B = sphere_in
if B == ():
score = 1
else:
score = 1 - (float(spheres[B[2]][1])/float(B[1]))
return score
def fit(self,X,y=None):
"""
Generates sets of hyper-spheres for anomaly scores
Parameters
----------
X : numpy array (nb_samples, nb_features)
data set
Returns
-------
self
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return self
def fit_transform(self,X,y=None):
"""
Generates sets of hyper-spheres for anomaly scores
Parameters
----------
X : numpy array (nb_samples, nb_features)
data set
Returns
-------
self
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return self
def fit_score(self,X,y=None):
"""
Generate set of hyper-sphere and return anomaly score for all points in dataset
Parameters
----------
X : numpy array
data set
Return
------
scores : numpy array
1-d vector with the anomaly scores for all data points
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
scores = np.zeros(X.shape[0])
for i in range(X.shape[0]):
if i % 1000 == 0 and self.verbose:
logger.info('Getting anomaly score for data point %i'
% i)
logger.info('X shape: %i X %i'
% X.shape)
scores_i = []
j=0
for spheres in self._sets_of_spheres:
score = self._score(X[i],spheres)
if i % 1000 == 0 and j % 10 ==0 and self.verbose:
logger.info('Anomaly score for data point %i from estimator %i: %f'
% (i,j,score))
scores_i.append(score)
j+=1
scores[i] = np.mean(scores_i)
if 'X_scored' not in dir(self):
self.X_scored = np.column_stack((X,scores))
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return scores
def get_all_scores(self):
"""
Returns the dataset with the anomaly scores stored in the last column
Parameters
----------
None
Returns
-------
X_scored : numpy array
the dataset with anomaly scores stored in the last column
"""
if 'X_scored' in dir(self):
return self.X_scored
else:
raise NameError('method get_all_scores returns scores only if method fit_score has been previously called')
return self
def get_score(self,X):
"""
Calculates the anomaly score for a new data point X
Parameters
----------
y : numpy array
1-d vector to score
Returns
-------
score : tuple
tuple where first element is the anomaly score and the second element is True if the point is lab elled as anomalous and False if is labelled as non-anomalous based on the decision threshold
"""
if X.ndim == 1:
s = np.zeros(2)
scores = []
for spheres in self._sets_of_spheres:
score_s = self._score(X,spheres)
scores.append(score_s)
score_mean = np.mean(scores)
s[0]=score_mean
s[1]=1-score_mean
return s
elif X.ndim == 2:
s = np.zeros((X.shape[0],2))
for i in range(X.shape[0]):
scores = []
for spheres in self._sets_of_spheres:
score_s = self._score(X,spheres)
scores.append(score_s)
score_mean = np.mean(scores)
s[i,0] = score_mean
s[i,1] = 1-score_mean
return s
def get_anomalies(self,decision_threshold=1):
"""
Returns the data points whose anomaly score is above the decision_threshold
Parameters
----------
decition_threshold : float
anomaly decision threshold. Default 0.5
Returns
-------
X_anom: numpy array (nb_anomalies, nb_features + 1)
anomalous data points with anomaly scores stored in the last column
"""
if 'X_scored' in dir(self):
X_tmp = self.X_scored[:,:-1]
scores_tmp = self.X_scored[:,-1]
X_an = X_tmp[scores_tmp>=decision_threshold]
anom_scores = scores_tmp[scores_tmp>=decision_threshold]
self.X_anom = np.column_stack((X_an,anom_scores))
return self.X_anom
else:
raise NameError('method get_anomalies returns scores only if method fit_score has been previously called')
return self
| apache-2.0 |
projectchrono/chrono | src/demos/python/irrlicht/demo_IRR_crank_plot.py | 4 | 5790 | #------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Alessandro Tasora
#
# Created: 1/01/2019
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import matplotlib.pyplot as plt
import numpy as np
print ("Example: create a slider crank and plot results");
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
mysystem = chrono.ChSystemNSC()
# Some data shared in the following
crank_center = chrono.ChVectorD(-1,0.5,0)
crank_rad = 0.4
crank_thick = 0.1
rod_length = 1.5
# Create four rigid bodies: the truss, the crank, the rod, the piston.
# Create the floor truss
mfloor = chrono.ChBodyEasyBox(3, 1, 3, 1000)
mfloor.SetPos(chrono.ChVectorD(0,-0.5,0))
mfloor.SetBodyFixed(True)
mysystem.Add(mfloor)
# Create the flywheel crank
mcrank = chrono.ChBodyEasyCylinder(crank_rad, crank_thick, 1000)
mcrank.SetPos(crank_center + chrono.ChVectorD(0, 0, -0.1))
# Since ChBodyEasyCylinder creates a vertical (y up) cylinder, here rotate it:
mcrank.SetRot(chrono.Q_ROTATE_Y_TO_Z)
mysystem.Add(mcrank)
# Create a stylized rod
mrod = chrono.ChBodyEasyBox(rod_length, 0.1, 0.1, 1000)
mrod.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length/2 , 0, 0))
mysystem.Add(mrod)
# Create a stylized piston
mpiston = chrono.ChBodyEasyCylinder(0.2, 0.3, 1000)
mpiston.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length, 0, 0))
mpiston.SetRot(chrono.Q_ROTATE_Y_TO_X)
mysystem.Add(mpiston)
# Now create constraints and motors between the bodies.
# Create crank-truss joint: a motor that spins the crank flywheel
my_motor = chrono.ChLinkMotorRotationSpeed()
my_motor.Initialize(mcrank, # the first connected body
mfloor, # the second connected body
chrono.ChFrameD(crank_center)) # where to create the motor in abs.space
my_angularspeed = chrono.ChFunction_Const(chrono.CH_C_PI) # ang.speed: 180°/s
my_motor.SetMotorFunction(my_angularspeed)
mysystem.Add(my_motor)
# Create crank-rod joint
mjointA = chrono.ChLinkLockRevolute()
mjointA.Initialize(mrod,
mcrank,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad,0,0) ))
mysystem.Add(mjointA)
# Create rod-piston joint
mjointB = chrono.ChLinkLockRevolute()
mjointB.Initialize(mpiston,
mrod,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0) ))
mysystem.Add(mjointB)
# Create piston-truss joint
mjointC = chrono.ChLinkLockPrismatic()
mjointC.Initialize(mpiston,
mfloor,
chrono.ChCoordsysD(
crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0),
chrono.Q_ROTATE_Z_TO_X)
)
mysystem.Add(mjointC)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(mysystem, 'PyChrono example', chronoirr.dimension2du(1024,768))
myapplication.AddTypicalSky()
myapplication.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
myapplication.AddTypicalCamera(chronoirr.vector3df(1,1,3), chronoirr.vector3df(0,1,0))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
# Initialize these lists to store values to plot.
array_time = []
array_angle = []
array_pos = []
array_speed = []
myapplication.SetTimestep(0.005)
myapplication.SetTryRealtime(True)
# Run the interactive simulation loop
while(myapplication.GetDevice().run()):
# for plotting, append instantaneous values:
array_time.append(mysystem.GetChTime())
array_angle.append(my_motor.GetMotorRot())
array_pos.append(mpiston.GetPos().x)
array_speed.append(mpiston.GetPos_dt().x)
# here happens the visualization and step time integration
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
# stop simulation after 2 seconds
if mysystem.GetChTime() > 20:
myapplication.GetDevice().closeDevice()
# Use matplotlib to make two plots when simulation ended:
fig, (ax1, ax2) = plt.subplots(2, sharex = True)
ax1.plot(array_angle, array_pos)
ax1.set(ylabel='position [m]')
ax1.grid()
ax2.plot(array_angle, array_speed, 'r--')
ax2.set(ylabel='speed [m]',xlabel='angle [rad]')
ax2.grid()
# trick to plot \pi on x axis of plots instead of 1 2 3 4 etc.
plt.xticks(np.linspace(0, 2*np.pi, 5),['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'])
| bsd-3-clause |
costypetrisor/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/GeneralisedEigen/MHDallatonce.py | 4 | 9242 | import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import matplotlib.pylab as plt
import MatrixOperations as MO
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class InnerOuterWITHOUT2inverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
#MX = self.AA+self.F
MX = self.F # MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
kspMX.setOperators(MX,MX)
OptDB = PETSc.Options()
#OptDB["pc_factor_mat_ordering_type"] = "rcm"
#OptDB["pc_factor_mat_solver_package"] = "mumps"
kspMX.setFromOptions()
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
FF = self.F
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setOperators(FF,FF)
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
| mit |
MattWellie/PAGE_MPO | tsv_gene_names_grab.py | 1 | 3555 | import csv, cPickle
import numpy as np
import matplotlib.pyplot as plt
"""
Something quick to get a set of genes from a csv file
"""
file_in = 'batch_query_no_infertile.tsv'
field = 'human_gene_symbol'
ddg2p = 'DDG2P.csv'
annotations = 'annotations.cPickle'
all_output = 'tsv_names_summary_out.txt'
gene_set = set()
gene_duplicates = set()
printed_lines = []
# Import the file
with open(file_in, 'rU') as handle:
dict = csv.DictReader(handle, delimiter='\t')
for row in dict:
gene_list = row[field].split('|')
printed_lines.append('{} - {}: {}'.format(row['mp_id'], row['mp_definition'], len(gene_list)))
for gene in gene_list:
if gene in gene_set:
gene_duplicates.add(gene)
else:
gene_set.add(gene)
printed_lines.append('Unique genes found: {}'.format(len(gene_set)))
printed_lines.append('{} genes were present in multiple categories:\n'.format(len(gene_duplicates)))
printed_lines.append(gene_duplicates)
# Dump the gene set to a pickle file
with open('genes_of_interest.cPickle', 'w') as handle:
cPickle.dump(gene_set, handle)
# Grab all the gene names from the DDG2P input file
ddg2p_set = set()
first_line = True
with open(ddg2p, 'r') as handle:
for line in handle:
if first_line:
first_line = False
else:
ddg2p_set.add(line.split(',')[0])
# Identify any overlapping genes:
ddg2p_overlap = set()
for gene in gene_set:
if gene in ddg2p_set:
ddg2p_overlap.add(gene)
# Dump the gene set to a pickle file
with open('ddg2p_overlap_genes.cPickle', 'w') as handle:
cPickle.dump(ddg2p_overlap, handle)
printed_lines.append('Total phenotype genes overlapping DDG2P: {}'.format(len(ddg2p_overlap)))
printed_lines.append(ddg2p_overlap)
# Import and use the pickled set of annotations from the DDD project
# This contains the HI, HS, and phenotype details where available
with open(annotations, 'r') as handle:
anno_dict = cPickle.load(handle)
# Create a list to hold all the
hi_scores = []
annotated_genes = set()
not_found = set()
for gene in ddg2p_overlap:
found = False
for chromosome in anno_dict:
if gene in anno_dict[chromosome]:
found = True
annotated_genes.add(gene)
printed_lines.append('\nHI Gene Annotations for {}'.format(gene))
ann_keys = anno_dict[chromosome][gene].keys()
if 'hi_score' in ann_keys:
printed_lines.append('\tHI: {}'.format(anno_dict[chromosome][gene]['hi_score']))
hi_scores.append(float(anno_dict[chromosome][gene]['hi_score']))
if 'hs_score' in ann_keys:
printed_lines.append('\tHS: {}'.format(anno_dict[chromosome][gene]['hs_score']))
if 'diseases' in ann_keys:
for disease in anno_dict[chromosome][gene]['diseases']:
printed_lines.append('\t{}'.format(disease))
if not found:
not_found.add(gene)
printed_lines.append('\n{}/{} Genes had annotations available'.format(len(annotated_genes), len(ddg2p_overlap)))
printed_lines.append('{} Genes didn\'t have annotations:'.format(len(not_found)))
printed_lines.append(not_found)
with open(all_output, 'wb') as handle:
for line in printed_lines:
print >>handle, line
# Maybe try and plot this as a graph
line = plt.figure()
plt.plot(sorted(hi_scores), 'o')
plt.ylabel('HI Score')
plt.xlabel('Gene (sorted by HI score)')
plt.title('A scatter plot of all HI scores')
plt.show() | apache-2.0 |
asurve/arvind-sysml2 | scripts/perftest/python/google_docs/update.py | 15 | 4666 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
import os.path
import argparse
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
import gspread
# Update data to google sheets
def parse_data(file_path):
"""
Skip reading 1st row : Header
Skip reading last row : Footer
"""
csv_file = pd.read_csv(file_path, sep=',', skiprows=1, skipfooter=1, engine='python')
algo = csv_file['INFO:root:algorithm'].apply(lambda x: x.split(':')[-1])
key = algo + '_'+ csv_file['run_type'] + '_' + csv_file['intercept'] + '_' + \
csv_file['matrix_type'] + '_' + csv_file['data_shape']
return key, csv_file['time_sec']
def auth(path, sheet_name):
"""
Responsible for authorization
"""
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(path, scope)
gc = gspread.authorize(creds)
sheet = gc.open("Perf").worksheet(sheet_name)
return sheet
def insert_pair(algo, time, start_col, tag):
"""
Wrapper function that calls insert_values to insert algo and time
"""
insert_values(sheet, algo, start_col, 'algo_{}'.format(tag))
insert_values(sheet, time, start_col + 1, 'time_{}'.format(tag))
print('Writing Complete')
def insert_values(sheet, key, col_num, header):
"""
Insert data to google sheets based on the arguments
"""
# Col Name
sheet.update_cell(1, col_num, header)
for id, val in enumerate(key):
sheet.update_cell(id + 2, col_num, val)
def get_dim(sheet):
"""
Get the dimensions of data
"""
try:
col_count = sheet.get_all_records()
except:
col_count = [[]]
row = len(col_count)
col = len(col_count[0])
return row, col
def row_append(data_frame, file):
"""
Append results to a local csv
"""
append_df = pd.read_csv(file)
concat_data = pd.concat([data_frame, append_df], axis=1)
return concat_data
# Example Usage
# ./update.py --file ../temp/test.out --exec-mode singlenode --auth client_json.json --tag 3.0
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Update / Stat Script')
cparser.add_argument('--file', help='Location of the current perf test outputs',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Backend Type', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--tag', help='Tagging header value',
required=True, metavar='')
cparser.add_argument('--auth', help='Location to read auth file', metavar='')
cparser.add_argument('--append', help='Location to append the outputs', metavar='')
args = cparser.parse_args()
if args.auth is None and args.append is None:
sys.exit('Both --auth and --append cannot be empty')
algo, time = parse_data(args.file)
if args.append is not None:
schema_df = {'algo_{}'.format(args.tag): algo,
'time_{}'.format(args.tag): time}
data_frame = pd.DataFrame(schema_df)
if os.path.isfile(args.append):
append_data = row_append(data_frame, args.append)
append_data.to_csv(args.append, sep=',', index=False)
else:
data_frame.to_csv(args.append, sep=',', index=False)
if args.auth is not None:
# Read data from file and write to google docs
algo, time = parse_data(args.file)
# Authenticate and get sheet dimensions
sheet = auth(args.auth, args.exec_type)
row, col = get_dim(sheet)
insert_pair(algo, time, col + 1, args.tag)
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/series/test_constructors.py | 2 | 43384 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from collections import OrderedDict
from numpy import nan
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas.api.types import CategoricalDtype
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype)
from pandas import (Index, Series, isna, date_range, Timestamp,
NaT, period_range, timedelta_range, MultiIndex,
IntervalIndex, Categorical, DataFrame)
from pandas._libs import lib
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, range, zip, long, PY36
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesConstructors(TestData):
def test_invalid_dtype(self):
# GH15520
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with tm.assert_raises_regex(TypeError, msg):
Series([], name='time', dtype=dtype)
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.])) == 1.0
assert int(Series([1.])) == 1
assert long(Series([1.])) == 1
def test_constructor(self):
assert self.ts.index.is_all_dates
# Pass in Series
derived = Series(self.ts)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, self.ts.index)
# Ensure new index is not created
assert id(self.ts.index) == id(derived.index)
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not self.empty.index.is_all_dates
assert not Series({}).index.is_all_dates
pytest.raises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
pytest.raises(NotImplementedError, Series, m)
@pytest.mark.parametrize('input_class', [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype='float64')
empty2 = Series(input_class(), dtype='float64')
assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype='category')
empty2 = Series(input_class(), dtype='category')
assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
empty = Series(index=lrange(10))
empty2 = Series(input_class(), index=lrange(10))
assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=lrange(10))
empty2 = Series(input_class(), index=lrange(10), dtype='float64')
assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series('', dtype=str, index=range(3))
empty2 = Series('', index=range(3))
assert_series_equal(empty, empty2)
@pytest.mark.parametrize('input_arg', [np.nan, float('nan')])
def test_constructor_nan(self, input_arg):
empty = Series(dtype='float64', index=lrange(10))
empty2 = Series(input_arg, index=lrange(10))
assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize('dtype', [
'f8', 'i8', 'M8[ns]', 'm8[ns]', 'category', 'object',
'datetime64[ns, UTC]',
])
@pytest.mark.parametrize('index', [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
result = pd.Series(index=['b', 'a', 'c'])
assert result.index.tolist() == ['b', 'a', 'c']
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(['x', None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(['x', np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)), dtype='int64')
result = Series(range(10), dtype='int64')
assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype='int64')
for obj in [[1, 2, 3], (1, 2, 3),
np.array([1, 2, 3], dtype='int64')]:
result = Series(obj, index=[0, 1, 2])
assert_series_equal(result, expected)
@pytest.mark.parametrize('input_vals', [
([1, 2]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(['1.0', '2.0', np.nan], dtype=object)
assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'],
fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# GH12574
pytest.raises(
ValueError, lambda: Series(pd.Categorical([1, 2, 3]),
dtype='int64'))
cat = Series(pd.Categorical([1, 2, 3]), dtype='category')
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype='category')
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
# test basic creation / coercion of categoricals
s = Series(factor, name='A')
assert s.dtype == 'category'
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == 'B'
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'],
[1, 'John P. Doe']],
columns=['person_id', 'person_name'])
x['person_name'] = Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(['a', 'b'],
dtype=CategoricalDtype(['a', 'b', 'c'],
ordered=True))
assert is_categorical_dtype(result) is True
tm.assert_index_equal(result.cat.categories, pd.Index(['a', 'b', 'c']))
assert result.cat.ordered
result = pd.Series(['a', 'b'], dtype=CategoricalDtype(['b', 'a']))
assert is_categorical_dtype(result)
tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a']))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series('a', index=[0, 1],
dtype=CategoricalDtype(['a', 'b'], ordered=True))
expected = Series(['a', 'a'], index=[0, 1],
dtype=CategoricalDtype(['a', 'b'], ordered=True))
tm.assert_series_equal(result, expected, check_categorical=True)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(['a', 'b', 'c'],
dtype=CategoricalDtype(['a', 'b']))
right = pd.Series(pd.Categorical(['a', 'b', np.nan],
categories=['a', 'b']))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3, ), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
data = ma.masked_all((3, ), dtype='M8[ns]')
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize('input', [[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(['a', 'b', 'a']),
(i for i in range(3)),
map(lambda x: x, range(3))])
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = 'Length of passed values is 3, index implies 4'
with pytest.raises(ValueError, message=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype='int64')
expected = Series(100, index=np.arange(4), dtype='int64')
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
pytest.raises(ValueError, Series, ['foo'], index=['a', 'b', 'c'])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
assert s.dtype == np.dtype('i8')
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
assert s.dtype == np.dtype('f8')
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.], np.array([1.])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.
assert not x.equals(y)
assert x[0] == 2.
assert y[0] == 1.
@pytest.mark.parametrize(
"index",
[
pd.date_range('20170101', periods=3, tz='US/Eastern'),
pd.date_range('20170101', periods=3),
pd.timedelta_range('1 day', periods=3),
pd.period_range('2012Q1', periods=3, freq='Q'),
pd.Index(list('abc')),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3)],
ids=lambda x: type(x).__name__)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._data.blocks[0].values is not index
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
assert s.dtype == np.float64
s = Series(None, index=lrange(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == 'datetime64[ns]'
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with tm.assert_raises_regex(ValueError, msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = 'Trying to coerce negative values to unsigned integers'
with tm.assert_raises_regex(OverflowError, msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with tm.assert_raises_regex(ValueError, msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp('20130101'), 'NOV'], dtype=object)
assert s.iloc[0] == Timestamp('20130101')
assert s.iloc[1] == 'NOV'
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame(
{'wing1': wing1,
'wing2': wing2,
'mat': mat}, index=belly)
result = df.loc['3T19']
assert result.dtype == object
result = df.loc['216']
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [np.array([None, None, None, None,
datetime.now(), None]),
np.array([None, None, datetime.now(), None])]:
result = Series(arr)
assert result.dtype == 'M8[ns]'
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=lrange(5))
assert not isna(s).all()
s = Series(nan, dtype='M8[ns]', index=lrange(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
assert isna(s[1])
assert s.dtype == 'M8[ns]'
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == 'M8[ns]'
s.iloc[0] = np.nan
assert s.dtype == 'M8[ns]'
# GH3414 related
pytest.raises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
pytest.raises(TypeError,
lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp('20130101'), 1], index=['a', 'b'])
assert result['a'] == Timestamp('20130101')
assert result['b'] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, index=dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, index=dates, dtype=object)
assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
assert s.dtype == 'datetime64[ns]'
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101', periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range('20130101', periods=3, tz='UTC')
assert str(Series(dr).iloc[0].tz) == 'UTC'
dr = date_range('20130101', periods=3, tz='US/Eastern')
assert str(Series(dr).iloc[0].tz) == 'US/Eastern'
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == 'object'
assert s[2] is pd.NaT
assert 'NaT' in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == 'object'
assert s[2] is np.nan
assert 'NaN' in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr)
assert s.dtype.name == 'datetime64[ns, US/Eastern]'
assert s.dtype == 'datetime64[ns, US/Eastern]'
assert is_datetime64tz_dtype(s.dtype)
assert 'datetime64[ns, US/Eastern]' in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == 'datetime64[ns]'
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize('UTC').tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[0]
assert result == Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern', freq='D')
result = s[Series([True, True, False], index=s.index)]
assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
assert_series_equal(result, s)
# short str
assert 'datetime64[ns, US/Eastern]' in str(s)
# formatting with NaT
result = s.shift()
assert 'datetime64[ns, US/Eastern]' in str(result)
assert 'NaT' in str(result)
# long str
t = Series(date_range('20130101', periods=1000, tz='US/Eastern'))
assert 'datetime64[ns, US/Eastern]' in str(t)
result = pd.DatetimeIndex(s, freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
assert s.dtype == 'datetime64[ns, US/Pacific]'
assert lib.infer_dtype(s) == 'datetime64'
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
assert s.dtype == 'object'
assert lib.infer_dtype(s) == 'datetime'
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern'))
assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('arg',
['2013-01-01 00:00:00', pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype='datetime64[ns, CET]')
expected = Series(pd.Timestamp(arg)).dt.tz_localize('CET')
assert_series_equal(result, expected)
def test_construction_interval(self):
# construction from interval & array of intervals
index = IntervalIndex.from_breaks(np.arange(3), closed='right')
result = Series(index)
repr(result)
str(result)
tm.assert_index_equal(Index(result.values), index)
result = Series(index.values)
tm.assert_index_equal(Index(result.values), index)
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert('UTC'), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101', periods=5, freq='D')
s = Series(pi)
expected = Series(pi.astype(object))
assert_series_equal(s, expected)
assert s.dtype == 'object'
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.iloc[0] = 0
expected.iloc[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {'b': 1, 'a': 0, 'c': 2}
result = Series(d)
if PY36:
expected = Series([1, 0, 2], index=list('bac'))
else:
expected = Series([0, 1, 2], index=list('abc'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: 'a', value: 'b', float('nan'): 'c', 4: 'd'}
result = Series(d).sort_values()
expected = Series(['a', 'b', 'c', 'd'], index=[1, value, np.nan, 4])
assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): 'a', (2, np.nan): 'b', (3, value): 'c'}
result = Series(d).sort_values()
expected = Series(['a', 'b', 'c'],
index=Index([(1, 1), (2, np.nan), (3, value)]))
assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3,
(None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6],
index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
pytest.raises(TypeError, Series, values)
values = frozenset(values)
pytest.raises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
assert tm.is_sorted(series.index)
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
assert series.dtype == np.object_
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
assert nans.dtype == np.float_
assert len(nans) == len(self.ts)
strings = Series('foo', index=self.ts.index)
assert strings.dtype == np.object_
assert len(strings) == len(self.ts)
d = datetime.now()
dates = Series(d, index=self.ts.index)
assert dates.dtype == 'M8[ns]'
assert len(dates) == len(self.ts)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=self.ts.index, dtype="category")
expected = Series(0, index=self.ts.index).astype("category")
assert categorical.dtype == 'category'
assert len(categorical) == len(self.ts)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1)])
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(
1, 's')])
assert td.dtype == 'timedelta64[ns]'
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([timedelta(days=1), np.nan], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(300000000), pd.NaT], dtype='m8[ns]')
assert td.dtype == 'timedelta64[ns]'
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == 'timedelta64[ns]'
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == 'object'
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == 'timedelta64[ns]'
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == 'timedelta64[ns]'
td = Series([np.timedelta64(1, 's')])
assert td.dtype == 'timedelta64[ns]'
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# pytest.raises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
pytest.raises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'], dtype='m8[ns]')
pytest.raises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
assert td.dtype == 'object'
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([np.nan, pd.NaT, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, None, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
s = Series([pd.NaT, np.nan, '1 Day'])
assert s.dtype == 'timedelta64[ns]'
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp('20130101'),
Timestamp('20130101', tz='US/Eastern')])
expected = Series([Timestamp('20130101'),
Timestamp('20130101', tz='US/Eastern')],
dtype='object')
assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype('M8[ns]')
expected = Series([NaT])
assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777., 'name', datetime(2001, 11, 11), (1, ), u"\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {'a': 0, 'b': 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
for n in [['name_list'], np.ones(2), {1: 2}]:
for data in [['name_list'], np.ones(2), {1: 2}]:
pytest.raises(TypeError, Series, data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range('1/1/2000', periods=10)))
assert series.dtype == 'M8[ns]'
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
s = Series(arr)
expected = Series(pd.timedelta_range('00:00:01', periods=3, freq='s'))
assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01', '2013-01-02',
'2013-01-03'], dtype='datetime64[D]'))
assert_series_equal(s, Series(date_range('20130101', periods=3,
freq='D')))
# s = Series(np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
# assert_series_equal(s,date_range('20130101
# 00:00:01',period=3,freq='s'))
@pytest.mark.parametrize(
"index",
[
date_range('1/1/2000', periods=10),
timedelta_range('1 day', periods=10),
period_range('2000-Q1', periods=10, freq='Q')],
ids=lambda x: type(x).__name__)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
msg = "Cannot cast {} to ".format(type(index).__name__)
with tm.assert_raises_regex(TypeError, msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range('1/1/2000', periods=10),
timedelta_range('1 day', periods=10),
period_range('2000-Q1', periods=10, freq='Q')],
ids=lambda x: type(x).__name__)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(pd.Index(index, dtype=object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(index.astype(object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
def test_constructor_generic_timestamp_deprecated(self):
# see gh-15524
with tm.assert_produces_warning(FutureWarning):
dtype = np.timedelta64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'm8[ns]'
with tm.assert_produces_warning(FutureWarning):
dtype = np.datetime64
s = Series([], dtype=dtype)
assert s.empty
assert s.dtype == 'M8[ns]'
# These timestamps have the wrong frequencies,
# so an Exception should be raised now.
msg = "cannot convert timedeltalike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='m8[ps]')
msg = "cannot convert datetimelike"
with tm.assert_raises_regex(TypeError, msg):
Series([], dtype='M8[ps]')
@pytest.mark.parametrize('dtype', [None, 'uint8', 'category'])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or 'int64')
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_tz_mixed_data(self):
# GH 13051
dt_list = [Timestamp('2016-05-01 02:03:37'),
Timestamp('2016-04-30 19:03:37-0700', tz='US/Pacific')]
result = Series(dt_list)
expected = Series(dt_list, dtype=object)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
mattweirick/mattweirick.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
apache/arrow | dev/archery/setup.py | 3 | 1985 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import operator
import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 6):
sys.exit('Python < 3.6 is not supported')
# For pathlib.Path compatibility
jinja_req = 'jinja2>=2.11'
extras = {
'lint': ['numpydoc==1.1.0', 'autopep8', 'flake8', 'cmake_format==0.6.13'],
'benchmark': ['pandas'],
'docker': ['ruamel.yaml', 'python-dotenv'],
'release': [jinja_req, 'jira', 'semver', 'gitpython'],
'crossbow': ['github3.py', jinja_req, 'pygit2>=1.6.0', 'ruamel.yaml',
'setuptools_scm'],
}
extras['bot'] = extras['crossbow'] + ['pygithub', 'jira']
extras['all'] = list(set(functools.reduce(operator.add, extras.values())))
setup(
name='archery',
version="0.1.0",
description='Apache Arrow Developers Tools',
url='http://github.com/apache/arrow',
maintainer='Arrow Developers',
maintainer_email='dev@arrow.apache.org',
packages=find_packages(),
include_package_data=True,
install_requires=['click>=7'],
tests_require=['pytest', 'responses'],
extras_require=extras,
entry_points='''
[console_scripts]
archery=archery.cli:archery
'''
)
| apache-2.0 |
ycasg/PyNLO | src/validation/Old and Partial Tests/ppln_generate_stepped_apodized_design.py | 2 | 3351 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 23 15:54:36 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import matplotlib.pyplot as plt
from pynlo.media.crystals.XTAL_PPLN import PPLN
from scipy import integrate
plt.close('all')
npoints = 2**6
crystallength = 40*1e-3
crystal = PPLN(45, length = crystallength)
pump_wl = 1064.
crystal.set_pp(crystal.calculate_poling_period(pump_wl, 1540, None))
sgnl_stop_wl = 1700
NPTS = 1000
mix_bw = crystal.calculate_mix_phasematching_bw(1064, np.linspace(1300, sgnl_stop_wl,NPTS))
idler = 1.0/(1.0/1064 - 1.0/np.linspace(1300, sgnl_stop_wl,NPTS))
print crystal.invert_dfg_qpm_to_signal_wl(1064, 24e-6)
# ODE for finding 'ideal' QPM structure
# dLambda/dz = 1/phasematching BW
# scale = 4.65e-9 # for propto BW
#scale = 1.3e5 # for propto 1/BW
scale = 7e-6 / (1e3*crystallength) # for linear chirp 10 um / crystal length
def dLdz(L, z):
signal = crystal.invert_dfg_qpm_to_signal_wl(pump_wl, L)
bw = crystal.calculate_mix_phasematching_bw(pump_wl, signal)
#return 1.0/(scale*bw)
#return (scale*bw)
return scale
z = 0
L = 32e-6 # perid to start at
period_len = 1e-3*10.0/5.0
print("Begin APPLN design")
design = [ [z+period_len/2, L] ]
while L > 24.5e-6:
signal = crystal.invert_dfg_qpm_to_signal_wl(pump_wl, L)
bw_invm_m = crystal.calculate_mix_phasematching_bw(pump_wl, signal)
optical_bw = bw_invm_m / period_len
print optical_bw
z += period_len
signal2 = 1.0e9/ ( 1/(signal*1e-9) + optical_bw)
print "signal %f->%f"%(signal, signal2)
L = crystal.calculate_poling_period(pump_wl, signal2, None)[0]
print L
design.append([z+period_len/2,L])
design = np.array(design)
print design
# Following Journal of the Optical Society of America B Vol. 26, Issue 12, pp. 2315-2322 (2009)
# doi: 10.1364/JOSAB.26.002315
# Use tanh apodization
# f(z) = \frac{1}{2} tanh\left(\frac{2az}{L}\right), 0\leq z \leq L/2
# f(z) = \frac{1}{2} tanh\left(\frac{2a(L-z)}{L}\right), L/2 < z \leq L
# Generate apodization function for one unit cell (grating period,)
# then concatenate together to form waveguide description
apod_zs = np.linspace(0, period_len/2.0, 1024)
apod_a = 7
apod_fs = np.tanh(2*apod_a*apod_zs / period_len)
grating_zs = []
grating_ps = []
for p in design:
grating_zs.append(p[0] - apod_zs[::-1])
grating_ps.append(apod_fs * p[1])
grating_zs.append(p[0] + apod_zs)
grating_ps.append(apod_fs[::-1] * p[1])
grating_zs = np.array(grating_zs).flatten() * 1e3
grating_ps = np.array(grating_ps).flatten()
grating_ps[grating_ps < 10*1e-6] = 10*1e-6
plt.plot(grating_zs, grating_ps)
plt.show()
np.savetxt('h:\\ppln_wg_apod.dat', np.vstack((grating_zs, grating_ps)).T) | gpl-3.0 |
shangwuhencc/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
stevenbergner/stevenbergner.github.io | Teaching/cmpt767/lab2/code/elevation_grid.py | 1 | 5397 | #!/usr/bin/env python3
"""
Efficient, local elevation lookup using intermediate tile representation
of world-wide SRTM elevation data.
Examples:
import elevation_grid as eg
import numpy as np
el = eg.get_elevation(50, -123.1)
print("A place near Whistler, BC is {} m above sea level".format(el))
import matplotlib.pyplot as plt
lats, lons = np.meshgrid(np.arange(-90,90,.5),np.arange(-180,180,.5))
elevs = [eg.get_elevations(np.array([late,lone]).T) for late,lone in zip(lats,lons)]
plt.pcolormesh(lons,lats,elevs,cmap='terrain')
plt.colorbar()
# plt.show()
"""
import numpy as np
import os
scriptpath = os.path.dirname(os.path.realpath(__file__))
elev_fname = os.path.join(scriptpath, 'elevations_latlon.npy')
tiles_fname = os.path.join(scriptpath, 'tiles_latlon.npy')
tile_size = 100
tile_degrees = 10
lat_ranges = np.arange(-90,90,tile_degrees)
lon_ranges = np.arange(-180,180,tile_degrees)
elevgrid = None
def make_elevation_grid():
""" Uses SRTM.py to create an intermediate elevation tile representation and
concatenates the tiles into a single array that can be indexed via latitude and longitude.
Note, this takes a long time and downloads about 62 GB of data.
Don't run this if the elevation grid is already available.
"""
def cleanup_elevation_grid():
"""Concatenate tiles_latlon into a single array and replace NaNs with 0"""
ta = [np.concatenate(tr,axis=1) for tr in tiles_latlon]
ta = np.concatenate(ta)
ta[np.isnan(ta)] = 0
print('Saving elevation array to {}'.format(elev_fname))
np.save(elev_fname, ta)
try:
import srtm
except:
print('Install SRTM.py via\n'
'pip3 install git+https://github.com/tkrajina/srtm.py.git')
raise
try:
print('Resuming construction of tiles from {}'.format(tiles_fname))
tiles_latlon = np.load(tiles_fname)
except:
print('Creating list of empty tiles')
tiles_latlon = [[None for _ in range(len(lon_ranges))] for _ in range(len(lat_ranges))]
for k, lati in enumerate(lat_ranges):
ed = srtm.get_data()
for l, loti in enumerate(lon_ranges):
print(lati, loti)
if tiles_latlon[k][l] is None: # only compute what we don't yet have
try:
tiles_latlon[k][l] = ed.get_image((tile_size,tile_size),
(lati,lati+tile_degrees),
(loti,loti+tile_degrees),
10000,
mode='array')
except:
print('Error producing tile {}, {}'.format(lati,loti))
pass
np.save(tiles_fname, tiles_latlon)
cleanup_elevation_grid()
# The overall SRTM tile data in ~/.cache/srtm is about 52 GB. It was impossible to download these few:
# broken_tiles = ['N21E035.hgt', 'N22E035.hgt', 'N24E035.hgt', 'N25E035.hgt', 'N26E035.hgt',
# 'N27E035.hgt', 'N28E035.hgt', 'N27E039.hgt', 'N28E035.hgt', 'N28E039.hgt',
# 'N29E039.hgt', ]
# load the preprocess elevation array (about 50 MB uncompressed)
import gzip
try:
try:
fh = gzip.open(elev_fname+'.gz','rb')
except:
fh = open(elev_fname,'rb')
elevgrid = np.load(fh)
fh.close()
except:
print("Warning: There was a problem initializing the elevation array from {}[.gz]".format(elev_fname))
print(" Consider to run make_elevation_grid()")
def get_elevations(latlons):
"""For latlons being a N x 2 np.array of latitude, longitude pairs, output an
array of length N giving the corresponding elevations in meters.
"""
lli = ((latlons + (90,180))*(float(tile_size)/tile_degrees)).astype(int)
return elevgrid[lli[:,0],lli[:,1]]
def get_elevation(lat, lon, get_elevations=get_elevations):
"""Lookup elevation in m"""
return get_elevations(np.array([[lat,lon]]))[0]
import requests
def request_elevations(latlons):
"""Obtain elevations from open-elevation.com"""
reqjson = dict(locations=[dict(latitude=float(lat),longitude=float(lon)) for lat,lon in latlons])
r = requests.post('https://api.open-elevation.com/api/v1/lookup', json=reqjson)
assert r, "Error making open elevation bulk request"
return [el['elevation'] for el in r.json()['results']]
#-----------------------------------------------------------------------------
import unittest
# from command line: python -m unittest elevation_grid.py
class TestElevationLookups(unittest.TestCase):
def test_elevations(self):
""" Compare SRTM against open-elevation.com info """
tol_m = 100
lats, lons = np.meshgrid(np.arange(48, 53, 1), np.arange(118, 122, 1));
latlons = np.stack([lats.flatten(), lons.flatten()]).T;
latlons = np.concatenate([latlons, latlons+.1])
rev = np.array(request_elevations(latlons))
gev = get_elevations(latlons)
np.set_printoptions(suppress=True)
self.assertTrue(abs((rev-gev)).max() < tol_m, np.stack((latlons[:,0],latlons[:,1],rev,gev,rev-gev)).T)
print(' lat', ' lon', 'open-elev.', 'srtm-array', 'difference')
print(np.stack((latlons[:,0],latlons[:,1],rev,gev,rev-gev)).T)
| mit |
olafhauk/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1959 | """
======================================================
Compute source power spectral density (PSD) in a label
======================================================
Returns an STC file containing the PSD (in dB) of each of the sources
within a label.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label,
dB=True)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/preprocessing/__init__.py | 3 | 1041 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import Normalizer
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
from ._weights import balance_weights
__all__ = [
'Binarizer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'balance_weights',
'binarize',
'normalize',
'scale',
'label_binarize',
]
| bsd-3-clause |
ntung/ramp | gaussian_process_no_normalization_of_inputs.py | 1 | 34661 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.utils import check_random_state, check_array, check_X_y
from sklearn.utils.validation import check_is_fitted
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
# X = (X - X_mean) / X_std # Rémi: cancel normalization
y = (y - y_mean) / y_std
print("bla")
else:
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
#print("here I modified sklearn")
#X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ self.random_state.rand(self.theta0.size).reshape(
self.theta0.shape) * np.log10(self.thetaU
/ self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
pastas/pastas | pastas/rfunc.py | 1 | 30984 | # coding=utf-8
"""This module contains all the response functions available in Pastas.
"""
import numpy as np
from pandas import DataFrame
from scipy.integrate import quad
from scipy.special import (gammainc, gammaincinv, k0, k1,
exp1, erfc, lambertw, erfcinv)
__all__ = ["Gamma", "Exponential", "Hantush", "Polder", "FourParam",
"DoubleExponential", "One", "Edelman", "HantushWellModel"]
class RfuncBase:
_name = "RfuncBase"
def __init__(self, up, meanstress, cutoff):
self.up = up
# Completely arbitrary number to prevent division by zero
if 1e-8 > meanstress > 0:
meanstress = 1e-8
elif meanstress < 0 and up is True:
meanstress = meanstress * -1
self.meanstress = meanstress
self.cutoff = cutoff
def get_init_parameters(self, name):
"""Get initial parameters and bounds. It is called by the stressmodel.
Parameters
----------
name : str
Name of the stressmodel
Returns
-------
parameters : pandas DataFrame
The initial parameters and parameter bounds used by the solver
"""
pass
def get_tmax(self, p, cutoff=None):
"""Method to get the response time for a certain cutoff
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
cutoff: float, optional
float between 0 and 1.
Returns
-------
tmax: float
Number of days when 99.9% of the response has effectuated, when the
cutoff is chosen at 0.999.
"""
pass
def step(self, p, dt=1, cutoff=None, maxtmax=None):
"""Method to return the step function.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float, optional
float between 0 and 1.
maxtmax: int, optional
Maximum timestep to compute the block response for.
Returns
-------
s: numpy.array
Array with the step response.
"""
pass
def block(self, p, dt=1, cutoff=None, maxtmax=None):
"""Method to return the block function.
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float, optional
float between 0 and 1.
maxtmax: int, optional
Maximum timestep to compute the block response for.
Returns
-------
s: numpy.array
Array with the block response.
"""
s = self.step(p, dt, cutoff, maxtmax)
return np.append(s[0], np.subtract(s[1:], s[:-1]))
def get_t(self, p, dt, cutoff, maxtmax=None):
"""Internal method to determine the times at which to evaluate the
step-response, from t=0
Parameters
----------
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float
timestep as a multiple of of day.
cutoff: float
float between 0 and 1, that determines which part of the step-
response is taken into account.
maxtmax: float, optional
The maximum time of the response, usually set to the simulation
length.
Returns
-------
t: numpy.array
Array with the times
"""
if isinstance(dt, np.ndarray):
return dt
else:
tmax = self.get_tmax(p, cutoff)
if maxtmax is not None:
tmax = min(tmax, maxtmax)
tmax = max(tmax, 3 * dt)
return np.arange(dt, tmax, dt)
class Gamma(RfuncBase):
"""Gamma response function with 3 parameters A, a, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = At^{n-1} e^{-t/a}
"""
_name = "Gamma"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 1e-5,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress,
-1e-5, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
# if n is too small, the length of response function is close to zero
parameters.loc[name + '_n'] = (1, 0.1, 100, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1e4, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return gammaincinv(p[1], cutoff) * p[2]
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * gammainc(p[1], t / p[2])
return s
class Exponential(RfuncBase):
"""Exponential response function with 2 parameters: A and a.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = A e^{-t/a}
"""
_name = "Exponential"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 2
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 1e-5,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress,
-1e-5, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return -p[1] * np.log(1 - cutoff)
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * (1.0 - np.exp(-t / p[1]))
return s
class HantushWellModel(RfuncBase):
"""
A special implementation of the Hantush well function for multiple wells.
Parameters
----------
up: bool, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False)
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. Default is 0.999.
Notes
-----
The Hantush well function is explained in [hantush_1955]_,
[veling_2010]_ and [asmuth_2008]_. The impulse response function may be
written as:
.. math:: \\theta(t) = \\frac{A}{t} K_0 \\left( \\frac{r^2}{4 \\lambda^2} \\right) \\exp(-t/a - ab/t)
.. math:: p[0] = A = \\frac{1}{4 \\pi T}
.. math:: p[1] = a = cS
.. math:: p[2] = b = 1^2 / (4 \\lambda^2)
.. math:: p[3] = r \, \\text{(not optimized)}
where :math:`\\lambda = \\sqrt{Tc}`
The parameter r (distance from the well to the observation point)
is passed as a known value, and is used to scale the response function.
The optimized parameters are slightly different from the original
Hantush implementation:
- A: in the original Hantush parameter A is the gain. Now the gain is
equal to :math:`\\text{gain} = A K_0 ( \\sqrt(4 r^2 b) )`
- a: is the same :math:`a = cS`
- b: is the same, but :math:`r` is set to 1 if passed separately,
:math:`b = 1^2 / (4 \\lambda^2)`
"""
_name = "HantushWellModel"
def __init__(self, up=False, meanstress=1, cutoff=0.999, distances=1.0):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
self.distances = distances
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
# divide by k0(2) to get same initial value as ps.Hantush
parameters.loc[name + '_A'] = (1 / (self.meanstress * k0(2)),
0, np.nan, True, name)
elif self.up is False:
# divide by k0(2) to get same initial value as ps.Hantush
parameters.loc[name + '_A'] = (-1 / (self.meanstress * k0(2)),
np.nan, 0, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress, np.nan,
np.nan, True, name)
parameters.loc[name + '_a'] = (100, 1e-3, 1e4, True, name)
# set initial and bounds for b taking into account distances
binit = 1.0 / np.mean(self.distances) ** 2
bmin = 1e-4 / np.max(self.distances) ** 2
bmax = 25. / np.max(self.distances) ** 2
parameters.loc[name + '_b'] = (binit, bmin, bmax, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
r = 1.0 if len(p) == 3 else p[3]
# approximate formula for tmax
if cutoff is None:
cutoff = self.cutoff
cS = p[1]
rho = np.sqrt(4 * r ** 2 * p[2])
k0rho = k0(rho)
return lambertw(1 / ((1 - cutoff) * k0rho)).real * cS
@staticmethod
def gain(p):
r = 1.0 if len(p) == 3 else p[3]
rho = np.sqrt(4 * r ** 2 * p[2])
return p[0] * k0(rho)
def step(self, p, dt=1, cutoff=None, maxtmax=None):
r = 1.0 if len(p) == 3 else p[3]
cS = p[1]
rho = np.sqrt(4 * r ** 2 * p[2])
k0rho = k0(rho)
t = self.get_t(p, dt, cutoff, maxtmax)
tau = t / cS
tau1 = tau[tau < rho / 2]
tau2 = tau[tau >= rho / 2]
w = (exp1(rho) - k0rho) / (exp1(rho) - exp1(rho / 2))
F = np.zeros_like(tau)
F[tau < rho / 2] = w * exp1(rho ** 2 / (4 * tau1)) - (w - 1) * exp1(
tau1 + rho ** 2 / (4 * tau1))
F[tau >= rho / 2] = 2 * k0rho - w * exp1(tau2) + (w - 1) * exp1(
tau2 + rho ** 2 / (4 * tau2))
return p[0] * F / 2
@staticmethod
def variance_gain(A, b, var_A, var_b, cov_Ab, r=1.0):
"""Calculate variance of the gain from parameters A and b.
Variance of the gain is calculated based on propagation of
uncertainty using optimal values and the variances of A and b
and the covariance between A and b.
Parameters
----------
A : float
optimal value of parameter A, (e.g. ml.parameters.optimal)
b : float
optimal value of parameter b, (e.g. ml.parameters.optimal)
var_A : float
variance of parameter A, can be obtained from the diagonal of
the covariance matrix (e.g. ml.fit.pcov)
var_b : float
variance of parameter A, can be obtained from the diagonal of
the covariance matrix (e.g. ml.fit.pcov)
cov_Ab : float
covariance between A and b, can be obtained from the covariance
matrix (e.g. ml.fit.pcov)
r : float or np.array, optional
distance(s) between observation well and stress(es),
default value is 1.0
Returns
-------
var_gain : float or np.array
variance of the gain calculated based on propagation of
uncertainty of parameters A and b.
"""
var_gain = (
(k0(2 * np.sqrt(r ** 2 * b))) ** 2 * var_A +
(-A * r * k1(2 * np.sqrt(r ** 2 * b)) / np.sqrt(
b)) ** 2 * var_b -
2 * A * r * k0(2 * np.sqrt(r ** 2 * b)) *
k1(2 * np.sqrt(r ** 2 * b)) / np.sqrt(b) * cov_Ab
)
return var_gain
class Hantush(RfuncBase):
"""
The Hantush well function, using the standard A, a, b parameters
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The Hantush well function is explained in [hantush_1955]_, [veling_2010]_
and [asmuth_2008]_. The impulse response function may be written as:
.. math:: \\theta(t) = \\frac{A}{t} \\exp(-t/a - ab/t)
.. math:: p[0] = A = \\frac{1}{2 \\pi T}
.. math:: p[1] = a = cS
.. math:: p[2] = b = r^2 / (4 \\lambda^2)
where :math:`\\lambda = \\sqrt{Tc}`
References
----------
.. [hantush_1955] Hantush, M. S., & Jacob, C. E. (1955). Non‐steady
radial flow in an infinite leaky aquifer. Eos, Transactions American
Geophysical Union, 36(1), 95-100.
.. [veling_2010] Veling, E. J. M., & Maas, C. (2010). Hantush well function
revisited. Journal of hydrology, 393(3), 381-388.
.. [asmuth_2008] Von Asmuth, J. R., Maas, K., Bakker, M., & Petersen,
J. (2008). Modeling time series of ground water head fluctuations
subjected to multiple stresses. Ground Water, 46(1), 30-40.
"""
_name = "Hantush"
def __init__(self, up=False, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress,
0, np.nan, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
np.nan, 0, True, name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_a'] = (100, 1e-3, 1e4, True, name)
parameters.loc[name + '_b'] = (1, 1e-6, 25, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
# approximate formula for tmax
if cutoff is None:
cutoff = self.cutoff
cS = p[1]
rho = np.sqrt(4 * p[2])
k0rho = k0(rho)
return lambertw(1 / ((1 - cutoff) * k0rho)).real * cS
@staticmethod
def gain(p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
cS = p[1]
rho = np.sqrt(4 * p[2])
k0rho = k0(rho)
t = self.get_t(p, dt, cutoff, maxtmax)
tau = t / cS
tau1 = tau[tau < rho / 2]
tau2 = tau[tau >= rho / 2]
w = (exp1(rho) - k0rho) / (exp1(rho) - exp1(rho / 2))
F = np.zeros_like(tau)
F[tau < rho / 2] = w * exp1(rho ** 2 / (4 * tau1)) - (w - 1) * exp1(
tau1 + rho ** 2 / (4 * tau1))
F[tau >= rho / 2] = 2 * k0rho - w * exp1(tau2) + (w - 1) * exp1(
tau2 + rho ** 2 / (4 * tau2))
return p[0] * F / (2 * k0rho)
class Polder(RfuncBase):
"""The Polder function, using the standard A, a, b parameters
Notes
-----
The Polder function is explained in [polder]_. The impulse response
function may be written as:
.. math:: \\theta(t) = \\exp(-\\sqrt(4b)) \\frac{A}{t^{-3/2}}
\\exp(-t/a -b/t)
.. math:: p[0] = A = \\exp(-x/\\lambda)
.. math:: p[1] = a = \\sqrt{\\frac{1}{cS}}
.. math:: p[2] = b = x^2 / (4 \\lambda^2)
where :math:`\\lambda = \\sqrt{kDc}`
References
----------
.. [polder] G.A. Bruggeman (1999). Analytical solutions of
geohydrological problems. Elsevier Science. Amsterdam, Eq. 123.32
"""
_name = "Polder"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 3
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
parameters.loc[name + '_A'] = (1, 0, 2, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 1000, True, name)
parameters.loc[name + '_b'] = (1, 1e-6, 25, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
_, a, b = p
b = a * b
x = np.sqrt(b / a)
inverfc = erfcinv(2 * cutoff)
y = (-inverfc + np.sqrt(inverfc ** 2 + 4 * x)) / 2
tmax = a * y ** 2
return tmax
def gain(self, p):
# the steady state solution of Mazure
g = p[0] * np.exp(-np.sqrt(4 * p[2]))
if not self.up:
g = -g
return g
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
A, a, b = p
s = A * self.polder_function(np.sqrt(b), np.sqrt(t / a))
# / np.exp(-2 * np.sqrt(b))
if not self.up:
s = -s
return s
@staticmethod
def polder_function(x, y):
s = 0.5 * np.exp(2 * x) * erfc(x / y + y) + \
0.5 * np.exp(-2 * x) * erfc(x / y - y)
return s
class One(RfuncBase):
"""Instant response with no lag and one parameter d.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True) or down (False), if None (default) the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
"""
_name = "One"
def __init__(self, up=None, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 1
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_d'] = (
self.meanstress, 0, np.nan, True, name)
elif self.up is False:
parameters.loc[name + '_d'] = (
-self.meanstress, np.nan, 0, True, name)
else:
parameters.loc[name + '_d'] = (
self.meanstress, np.nan, np.nan, True, name)
return parameters
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
if isinstance(dt, np.ndarray):
return p[0] * np.ones(len(dt))
else:
return p[0] * np.ones(1)
def block(self, p, dt=1, cutoff=None, maxtmax=None):
return p[0] * np.ones(1)
class FourParam(RfuncBase):
"""Four Parameter response function with 4 parameters A, a, b, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = At^{n-1} e^{-t/a -ab/t}
If Fourparam.quad is set to True, this response function uses np.quad to
integrate the Four Parameter response function, which requires more
calculation time.
"""
_name = "FourParam"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 4
self.quad = False
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_n'] = (1, -10, 10, True, name)
parameters.loc[name + '_a'] = (10, 0.01, 5000, True, name)
parameters.loc[name + '_b'] = (10, 1e-6, 25, True, name)
return parameters
@staticmethod
def function(t, p):
return (t ** (p[1] - 1)) * np.exp(-t / p[2] - p[2] * p[3] / t)
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
if self.quad:
x = np.arange(1, 10000, 1)
y = np.zeros_like(x)
func = self.function(x, p)
func_half = self.function(x[:-1] + 1 / 2, p)
y[1:] = y[0] + np.cumsum(1 / 6 *
(func[:-1] + 4 * func_half + func[1:]))
y = y / quad(self.function, 0, np.inf, args=p)[0]
return np.searchsorted(y, cutoff)
else:
t1 = -np.sqrt(3 / 5)
t2 = 0
t3 = np.sqrt(3 / 5)
w1 = 5 / 9
w2 = 8 / 9
w3 = 5 / 9
x = np.arange(1, 10000, 1)
y = np.zeros_like(x)
func = self.function(x, p)
func_half = self.function(x[:-1] + 1 / 2, p)
y[0] = 0.5 * (w1 * self.function(0.5 * t1 + 0.5, p) +
w2 * self.function(0.5 * t2 + 0.5, p) +
w3 * self.function(0.5 * t3 + 0.5, p))
y[1:] = y[0] + np.cumsum(1 / 6 *
(func[:-1] + 4 * func_half + func[1:]))
y = y / quad(self.function, 0, np.inf, args=p)[0]
return np.searchsorted(y, cutoff)
@staticmethod
def gain(p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
if self.quad:
t = self.get_t(p, dt, cutoff, maxtmax)
s = np.zeros_like(t)
s[0] = quad(self.function, 0, dt, args=p)[0]
for i in range(1, len(t)):
s[i] = s[i - 1] + quad(self.function, t[i - 1], t[i], args=p)[
0]
s = s * (p[0] / (quad(self.function, 0, np.inf, args=p))[0])
return s
else:
t1 = -np.sqrt(3 / 5)
t2 = 0
t3 = np.sqrt(3 / 5)
w1 = 5 / 9
w2 = 8 / 9
w3 = 5 / 9
if dt > 0.1:
step = 0.1 # step size for numerical integration
tmax = max(self.get_tmax(p, cutoff), 3 * dt)
t = np.arange(step, tmax, step)
s = np.zeros_like(t)
# for interval [0,dt] :
s[0] = (step / 2) * \
(w1 * self.function((step / 2) * t1 + (step / 2), p) +
w2 * self.function((step / 2) * t2 + (step / 2), p) +
w3 * self.function((step / 2) * t3 + (step / 2), p))
# for interval [dt,tmax]:
func = self.function(t, p)
func_half = self.function(t[:-1] + step / 2, p)
s[1:] = s[0] + np.cumsum(
step / 6 * (func[:-1] + 4 * func_half + func[1:]))
s = s * (p[0] / quad(self.function, 0, np.inf, args=p)[0])
return s[int(dt / step - 1)::int(dt / step)]
else:
t = self.get_t(p, dt, cutoff, maxtmax)
s = np.zeros_like(t)
# for interval [0,dt] Gaussian quadrate:
s[0] = (dt / 2) * \
(w1 * self.function((dt / 2) * t1 + (dt / 2), p) +
w2 * self.function((dt / 2) * t2 + (dt / 2), p) +
w3 * self.function((dt / 2) * t3 + (dt / 2), p))
# for interval [dt,tmax] Simpson integration:
func = self.function(t, p)
func_half = self.function(t[:-1] + dt / 2, p)
s[1:] = s[0] + np.cumsum(
dt / 6 * (func[:-1] + 4 * func_half + func[1:]))
s = s * (p[0] / quad(self.function, 0, np.inf, args=p)[0])
return s
class DoubleExponential(RfuncBase):
"""Gamma response function with 3 parameters A, a, and n.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The impulse response function may be written as:
.. math:: \\theta(t) = A (1 - \\alpha) e^{-t/a_1} + A \\alpha e^{-t/a_2}
"""
_name = "DoubleExponential"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 4
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
if self.up:
parameters.loc[name + '_A'] = (1 / self.meanstress, 0,
100 / self.meanstress, True, name)
elif self.up is False:
parameters.loc[name + '_A'] = (-1 / self.meanstress,
-100 / self.meanstress, 0, True,
name)
else:
parameters.loc[name + '_A'] = (1 / self.meanstress,
np.nan, np.nan, True, name)
parameters.loc[name + '_alpha'] = (0.1, 0.01, 0.99, True, name)
parameters.loc[name + '_a1'] = (10, 0.01, 5000, True, name)
parameters.loc[name + '_a2'] = (10, 0.01, 5000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
if p[2] > p[3]: # a1 > a2
return -p[2] * np.log(1 - cutoff)
else: # a1 < a2
return -p[3] * np.log(1 - cutoff)
def gain(self, p):
return p[0]
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = p[0] * (1 - ((1 - p[1]) * np.exp(-t / p[2]) +
p[1] * np.exp(-t / p[3])))
return s
class Edelman(RfuncBase):
"""The function of Edelman, describing the propagation of an instantaneous
water level change into an adjacent half-infinite aquifer.
Parameters
----------
up: bool or None, optional
indicates whether a positive stress will cause the head to go up
(True, default) or down (False), if None the head can go both ways.
meanstress: float
mean value of the stress, used to set the initial value such that
the final step times the mean stress equals 1
cutoff: float
proportion after which the step function is cut off. default is 0.999.
Notes
-----
The Edelman function is emplained in [5]_. The impulse response function
may be written as:
.. math:: \\text{unknown}
It's parameters are:
.. math:: p[0] = \\beta = \\frac{\\sqrt{\\frac{4kD}{S}}}{x}
References
----------
.. [5] http://grondwaterformules.nl/index.php/formules/waterloop/peilverandering
"""
_name = "Edelman"
def __init__(self, up=True, meanstress=1, cutoff=0.999):
RfuncBase.__init__(self, up, meanstress, cutoff)
self.nparam = 1
def get_init_parameters(self, name):
parameters = DataFrame(
columns=['initial', 'pmin', 'pmax', 'vary', 'name'])
beta_init = 1.0
parameters.loc[name + '_beta'] = (beta_init, 0, 1000, True, name)
return parameters
def get_tmax(self, p, cutoff=None):
if cutoff is None:
cutoff = self.cutoff
return 1. / (p[0] * erfcinv(cutoff * erfc(0))) ** 2
@staticmethod
def gain(p):
return 1.
def step(self, p, dt=1, cutoff=None, maxtmax=None):
t = self.get_t(p, dt, cutoff, maxtmax)
s = erfc(1 / (p[0] * np.sqrt(t)))
return s
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/indexes/category.py | 4 | 24566 | import numpy as np
from pandas._libs import index as libindex
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import ABCCategorical, ABCSeries
from pandas.core.dtypes.common import (
is_categorical_dtype,
_ensure_platform_int,
is_list_like,
is_interval_dtype,
is_scalar)
from pandas.core.common import (_asarray_tuplesafe,
_values_from_object)
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.algorithms import take_1d
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.config import get_option
from pandas.core.indexes.base import Index, _index_shared_docs
import pandas.core.base as base
import pandas.core.missing as missing
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(dict(target_klass='CategoricalIndex'))
class CategoricalIndex(Index, base.PandasDelegate):
"""
Immutable Index implementing an ordered, sliceable set. CategoricalIndex
represents a sparsely populated Index with an underlying Categorical.
.. versionadded:: 0.16.1
Parameters
----------
data : array-like or Categorical, (1-dimensional)
categories : optional, array-like
categories for the CategoricalIndex
ordered : boolean,
designating if the categories are ordered
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
See Also
--------
Categorical, Index
"""
_typ = 'categoricalindex'
_engine_type = libindex.Int64Engine
_attributes = ['name']
def __new__(cls, data=None, categories=None, ordered=None, dtype=None,
copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
if name is None and hasattr(data, 'name'):
name = data.name
if isinstance(data, ABCCategorical):
data = cls._create_categorical(cls, data, categories, ordered)
elif isinstance(data, CategoricalIndex):
data = data._data
data = cls._create_categorical(cls, data, categories, ordered)
else:
# don't allow scalars
# if data is None, then categories must be provided
if is_scalar(data):
if data is not None or categories is None:
cls._scalar_data_error(data)
data = []
data = cls._create_categorical(cls, data, categories, ordered)
if copy:
data = data.copy()
return cls._simple_new(data, name=name)
def _create_from_codes(self, codes, categories=None, ordered=None,
name=None):
"""
*this is an internal non-public method*
create the correct categorical from codes
Parameters
----------
codes : new codes
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
name : optional name attribute, defaults to existing
Returns
-------
CategoricalIndex
"""
from pandas.core.categorical import Categorical
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
if name is None:
name = self.name
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
@staticmethod
def _create_categorical(self, data, categories=None, ordered=None):
"""
*this is an internal non-public method*
create the correct categorical from data and the properties
Parameters
----------
data : data for new Categorical
categories : optional categories, defaults to existing
ordered : optional ordered attribute, defaults to existing
Returns
-------
Categorical
"""
if not isinstance(data, ABCCategorical):
ordered = False if ordered is None else ordered
from pandas.core.categorical import Categorical
data = Categorical(data, categories=categories, ordered=ordered)
else:
if categories is not None:
data = data.set_categories(categories)
if ordered is not None:
data = data.set_ordered(ordered)
return data
@classmethod
def _simple_new(cls, values, name=None, categories=None, ordered=None,
**kwargs):
result = object.__new__(cls)
values = cls._create_categorical(cls, values, categories, ordered)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, categories=None, ordered=None,
**kwargs):
# categories and ordered can't be part of attributes,
# as these are properties
if categories is None:
categories = self.categories
if ordered is None:
ordered = self.ordered
return super(CategoricalIndex,
self)._shallow_copy(values=values, categories=categories,
ordered=ordered, **kwargs)
def _is_dtype_compat(self, other):
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
if isinstance(other, CategoricalIndex):
other = other._values
if not other.is_dtype_equal(self):
raise TypeError("categories must match existing categories "
"when appending")
else:
values = other
if not is_list_like(values):
values = [values]
other = CategoricalIndex(self._create_categorical(
self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a "
"CategoricalIndex")
return other
def equals(self, other):
"""
Determines if two CategorialIndex objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
return array_equivalent(self._data, other)
except (TypeError, ValueError):
pass
return False
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
attrs = [
('categories',
ibase.default_pprint(self.categories,
max_seq_items=max_categories)),
('ordered', self.ordered)]
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
attrs.append(('dtype', "'%s'" % self.dtype))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
@property
def inferred_type(self):
return 'categorical'
@property
def values(self):
""" return the underlying data, which is a Categorical """
return self._data
def get_values(self):
""" return the underlying data as an ndarray """
return self._data.get_values()
@property
def codes(self):
return self._data.codes
@property
def categories(self):
return self._data.categories
@property
def ordered(self):
return self._data.ordered
def _reverse_indexer(self):
return self._data._reverse_indexer()
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
if self.categories._defer_to_indexing:
return key in self.categories
return key in self.values
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
if self.categories._defer_to_indexing:
return self.categories.contains(key)
return key in self.values
def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_interval_dtype(dtype):
from pandas import IntervalIndex
return IntervalIndex.from_intervals(np.array(self))
return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy)
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return self._data.codes == -1
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value, downcast=None):
self._assert_can_do_op(value)
return CategoricalIndex(self._data.fillna(value), name=self.name)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@cache_readonly
def _engine(self):
# we are going to look things up with the codes themselves
return self._engine_type(lambda: self.codes.astype('i8'), len(self))
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = base.IndexOpsMixin.unique(self)
# CategoricalIndex._shallow_copy uses keeps original categories
# and ordered if not otherwise specified
return self._shallow_copy(result, categories=result.categories,
ordered=result.ordered)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas._libs.hashtable import duplicated_int64
codes = self.codes.astype('i8')
return duplicated_int64(codes, keep)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype('object')
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None}
* default: exact matches only.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
codes = self.categories.get_loc(key)
if (codes == -1):
raise KeyError(key)
return self._engine.get_loc(codes)
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
indexer = self.get_loc(k)
return series.iloc[indexer]
except (KeyError, TypeError):
pass
# we might be a positional inexer
return super(CategoricalIndex, self).get_value(series, key)
def _can_reindex(self, indexer):
""" always allow reindexing """
pass
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
from pandas.core.categorical import Categorical
cat = Categorical(values,
categories=self.categories,
ordered=self.ordered)
return self._shallow_copy(cat, **self._get_attributes_dict())
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError("argument method is not implemented for "
"CategoricalIndex.reindex")
if level is not None:
raise NotImplementedError("argument level is not implemented for "
"CategoricalIndex.reindex")
if limit is not None:
raise NotImplementedError("argument limit is not implemented for "
"CategoricalIndex.reindex")
target = ibase._ensure_index(target)
if not is_categorical_dtype(target) and not target.is_unique:
raise ValueError("cannot reindex with a non-unique indexer")
indexer, missing = self.get_indexer_non_unique(np.array(target))
new_target = self.take(indexer)
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(
np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
new_target = self._create_from_codes(codes)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an inital Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = target._shallow_copy(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
""" reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = self._shallow_copy(new_target)
return new_target, indexer, new_indexer
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ibase._ensure_index(target)
if self.equals(target):
return np.arange(len(self), dtype='intp')
if method == 'pad' or method == 'backfill':
raise NotImplementedError("method='pad' and method='backfill' not "
"implemented yet for CategoricalIndex")
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for CategoricalIndex')
if (isinstance(target, CategoricalIndex) and
self.values.is_dtype_equal(target)):
# we have the same codes
codes = target.codes
else:
if isinstance(target, CategoricalIndex):
code_indexer = self.categories.get_indexer(target.categories)
codes = take_1d(code_indexer, target.codes, fill_value=-1)
else:
codes = self.categories.get_indexer(target)
indexer, _ = self._engine.get_indexer_non_unique(codes)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase._ensure_index(target)
if isinstance(target, CategoricalIndex):
target = target.categories
codes = self.categories.get_indexer(target)
return self._engine.get_indexer_non_unique(codes)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
if self.categories._defer_to_indexing:
return self.categories._convert_scalar_indexer(key, kind=kind)
return super(CategoricalIndex, self)._convert_scalar_indexer(
key, kind=kind)
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
indexer = self.categories._convert_list_indexer(keyarr, kind=kind)
return Index(self.codes).get_indexer_for(indexer)
indexer = self.categories.get_indexer(np.asarray(keyarr))
if (indexer == -1).any():
raise KeyError(
"a list-indexer must only "
"include values that are "
"in the categories")
return self.get_indexer(keyarr)
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = _asarray_tuplesafe(keyarr)
if self.categories._defer_to_indexing:
return keyarr
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return self._create_from_codes(taken)
take_nd = take
def map(self, mapper):
"""Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be a CategoricalIndex
which has the same order property as the original. Otherwise,
the result will be a Index.
Returns
-------
applied : CategoricalIndex or Index
"""
return self._shallow_copy_with_infer(self.values.map(mapper))
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._create_from_codes(np.delete(self.codes, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not in the categories
"""
code = self.categories.get_indexer([item])
if (code == -1):
raise TypeError("cannot insert an item into a CategoricalIndex "
"that is not already an existing category")
codes = self.codes
codes = np.concatenate((codes[:loc], code, codes[loc:]))
return self._create_from_codes(codes)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
ValueError if other is not in the categories
"""
to_concat = [self._is_dtype_compat(c) for c in to_concat]
codes = np.concatenate([c.codes for c in to_concat])
result = self._create_from_codes(codes, name=name)
# if name is None, _create_from_codes sets self.name
result.name = name
return result
def _codes_for_groupby(self, sort):
""" Return a Categorical adjusted for groupby """
return self.values._codes_for_groupby(sort)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
# if we have a Categorical type, then must have the same
# categories
if isinstance(other, CategoricalIndex):
other = other._values
elif isinstance(other, Index):
other = self._create_categorical(
self, other._values, categories=self.categories,
ordered=self.ordered)
if isinstance(other, (ABCCategorical, np.ndarray,
ABCSeries)):
if len(self.values) != len(other):
raise ValueError("Lengths must match to compare")
if isinstance(other, ABCCategorical):
if not self.values.is_dtype_equal(other):
raise TypeError("categorical index comparisions must "
"have the same categories and ordered "
"attributes")
return getattr(self.values, op)(other)
return _evaluate_compare
cls.__eq__ = _make_compare('__eq__')
cls.__ne__ = _make_compare('__ne__')
cls.__lt__ = _make_compare('__lt__')
cls.__gt__ = _make_compare('__gt__')
cls.__le__ = _make_compare('__le__')
cls.__ge__ = _make_compare('__ge__')
def _delegate_method(self, name, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if 'inplace' in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
@classmethod
def _add_accessors(cls):
""" add in Categorical accessor methods """
from pandas.core.categorical import Categorical
CategoricalIndex._add_delegate_accessors(
delegate=Categorical, accessors=["rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered", "as_unordered",
"min", "max"],
typ='method', overwrite=True)
CategoricalIndex._add_numeric_methods_add_sub_disabled()
CategoricalIndex._add_numeric_methods_disabled()
CategoricalIndex._add_logical_methods_disabled()
CategoricalIndex._add_comparison_methods()
CategoricalIndex._add_accessors()
| mit |
Jokiva/Computational-Physics | lecture 9/Problem 1.py | 1 | 1279 | from fitting import *
import numpy as np
import matplotlib.pyplot as plt
# linear function
def f(x, coeffs):
if len(coeffs) != 2:
raise ValueError('the length of coefficient array should be two')
return coeffs[0] + coeffs[1] * x
# data 1
x_1 = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y_1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
# data 2
x_2 = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y_2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
# data 3
x_3 = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y_3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
# data 4
x_4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y_4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
xs = [x_1, x_2, x_3, x_4]
ys = [y_1, y_2, y_3, y_4]
for i in range(4):
# get the fit result
coeff = linear_fit(xs[i], ys[i])
# create the range for evaluation
x = np.linspace(xs[i].min(), xs[i].max())
# the fitted data
y = evaluate_linear_result(x, coeff)
# make a plot
plt.figure()
plt.title('Data ' + str(i+1))
plt.scatter(xs[i], ys[i], color='r')
plt.plot(x, y)
plt.grid()
plt.show()
| gpl-3.0 |
mmottahedi/neuralnilm_prototype | scripts/e430.py | 2 | 7309 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 2000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[200, 500, 200, 2500, 2400],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-1,
learning_rate_changes_by_iteration={
2000: 1e-2,
10000: 1e-3
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=20)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e430.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
murali-munna/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
KT12/hands_on_machine_learning | time_series_rnn_without_wrapper.py | 1 | 3226 | # Predict time series w/o using OutputProjectWrapper
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(basic_cell, X,
dtype=tf.float32)
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
# Generat a creative new seq
n_iterations = 2000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
sequence1 = [0. for j in range(n_steps)]
for k in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for j in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, 'b-')
plt.plot(t[:n_steps],sequence1[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.ylabel('Value')
plt.subplot(122)
plt.plot(t, sequence2, 'b-')
plt.plot(t[:n_steps], sequence2[:n_steps], 'b-', linewidth=3)
plt.xlabel('Time')
plt.show() | mit |
MartinSavc/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
chetan51/nupic | examples/opf/clients/cpu/cpu.py | 17 | 3151 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""A simple client to read CPU usage and predict it in real time."""
from collections import deque
import time
import psutil
import matplotlib.pyplot as plt
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import model_params
SECONDS_PER_STEP = 2
WINDOW = 60
# turn matplotlib interactive mode on (ion)
plt.ion()
fig = plt.figure()
# plot title, legend, etc
plt.title('CPU prediction example')
plt.xlabel('time [s]')
plt.ylabel('CPU usage [%]')
def runCPU():
"""Poll CPU usage, make predictions, and plot the results. Runs forever."""
# Create the model for predicting CPU usage.
model = ModelFactory.create(model_params.MODEL_PARAMS)
model.enableInference({'predictedField': 'cpu'})
# The shifter will align prediction and actual values.
shifter = InferenceShifter()
# Keep the last WINDOW predicted and actual values for plotting.
actHistory = deque([0.0] * WINDOW, maxlen=60)
predHistory = deque([0.0] * WINDOW, maxlen=60)
# Initialize the plot lines that we will update with each new record.
actline, = plt.plot(range(WINDOW), actHistory)
predline, = plt.plot(range(WINDOW), predHistory)
# Set the y-axis range.
actline.axes.set_ylim(0, 100)
predline.axes.set_ylim(0, 100)
while True:
s = time.time()
# Get the CPU usage.
cpu = psutil.cpu_percent()
# Run the input through the model and shift the resulting prediction.
modelInput = {'cpu': cpu}
result = shifter.shift(model.run(modelInput))
# Update the trailing predicted and actual value deques.
inference = result.inferences['multiStepBestPredictions'][5]
if inference is not None:
actHistory.append(result.rawInput['cpu'])
predHistory.append(inference)
# Redraw the chart with the new data.
actline.set_ydata(actHistory) # update the data
predline.set_ydata(predHistory) # update the data
plt.draw()
plt.legend( ('actual','predicted') )
# Make sure we wait a total of 2 seconds per iteration.
try:
plt.pause(SECONDS_PER_STEP)
except:
pass
if __name__ == "__main__":
runCPU()
| gpl-3.0 |
matthiasplappert/motion_classification | src/evaluate_features.py | 1 | 7928 | # coding=utf8
from collections import namedtuple
from argparse import ArgumentParser
import timeit
import os
import logging
from itertools import chain, combinations
import csv
import numpy as np
from sklearn.cross_validation import ShuffleSplit
from sklearn.preprocessing import MinMaxScaler
from toolkit.hmm.impl_hmmlearn import GaussianHMM
from toolkit.dataset.base import load_manifest
import toolkit.metrics as metrics
import toolkit.dataset.mmm as mmm
import toolkit.dataset.vicon as vicon
Dataset = namedtuple('Dataset', 'X y target_names groups lengths')
def get_parser():
parser = ArgumentParser()
parser.add_argument('dataset', help='path to the dataset')
parser.add_argument('--output', type=str, default=None)
parser.add_argument('--topology', choices=['left-to-right', 'left-to-right-cycle', 'bakis', 'full'],
default='left-to-right')
parser.add_argument('--n-training-iterations', type=int, default=10)
parser.add_argument('--n-iterations', type=int, default=10)
parser.add_argument('--test-size', type=float, default=0.1)
parser.add_argument('--n-states', type=int, default=10)
parser.add_argument('--disable-cache', action='store_true')
parser.add_argument('--normalize', action='store_true')
parser.add_argument('--preprocessing', nargs='*', choices=['scale'], default=['scale'])
all_features = mmm.FEATURE_NAMES + vicon.FEATURE_NAMES
parser.add_argument('--features', choices=all_features, nargs='+', default=all_features)
return parser
def evaluate(X, args):
enum = ShuffleSplit(len(X), n_iter=args.n_iterations, test_size=args.test_size)
train_scores = []
test_scores = []
for train_index, test_index in enum:
X_train = [X[idx] for idx in train_index]
X_test = [X[idx] for idx in test_index]
X_train, X_test = preprocess_datasets(X_train, X_test, args)
model = GaussianHMM(n_states=args.n_states, n_training_iterations=args.n_training_iterations,
topology=args.topology)
model.fit(X_train)
train_scores.extend([model.loglikelihood(X_curr) for X_curr in X_train])
test_scores.extend([model.loglikelihood(X_curr) for X_curr in X_test])
train_scores_array = np.array(train_scores)
train_mean = float(np.mean(train_scores_array))
train_std = float(np.std(train_scores_array))
test_scores_array = np.array(test_scores)
test_mean = float(np.mean(test_scores_array))
test_std = float(np.std(test_scores_array))
return train_mean, train_std, test_mean, test_std
def load_dataset(path, motion_type, feature_names, args):
print('Loading data set "%s" ...' % path)
X, y, target_names, groups, lengths = load_manifest(path, motion_type, feature_names=feature_names,
use_cache=not args.disable_cache, normalize=args.normalize)
assert len(X) == len(y)
return Dataset(X, y, target_names, groups, lengths)
def feature_indexes_from_set(all_features, feature_set, lengths):
indexes = []
idx = 0
for feature, length in zip(all_features, lengths):
if feature in feature_set:
indexes.extend(range(idx, idx + length))
idx += length
return indexes
def preprocess_datasets(X_train, X_test, args):
if 'scale' in args.preprocessing:
print('Scaling features to range [-1,1] ...')
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(np.vstack(X_train))
X_train = [scaler.transform(X_curr) for X_curr in X_train]
X_test = [scaler.transform(X_curr) for X_curr in X_test]
return X_train, X_test
def main(args):
start = timeit.default_timer()
# Validate that paths exist so that we don't need to check that whenever we use it
if not os.path.exists(args.dataset):
exit('data set at path "%s" does not exist' % args.dataset)
print('Arguments: %s\n' % args)
# Load dataset and combine them
all_features = args.features
mmm_features = [feature for feature in all_features if feature in mmm.FEATURE_NAMES]
vicon_features = [feature for feature in all_features if feature in vicon.FEATURE_NAMES]
mmm_data = load_dataset(args.dataset, 'mmm-nlopt', mmm_features, args)
vicon_data = load_dataset(args.dataset, 'vicon', vicon_features, args)
assert mmm_data.y.shape == vicon_data.y.shape
assert mmm_data.y.shape == (len(mmm_data.X), 1) # assert that only one class is used per data set
assert len(vicon_data.X) == len(mmm_data.X)
lengths = mmm_data.lengths + vicon_data.lengths
X = []
for idx in xrange(len(mmm_data.X)):
X.append(np.hstack((mmm_data.X[idx], vicon_data.X[idx])))
assert len(mmm_data.X) == len(X)
# Calculate power set of all features
# (source: http://stackoverflow.com/questions/10342939/power-set-and-cartesian-product-of-a-set-python)
all_features_power_set = []
for z in chain.from_iterable(combinations(all_features, r) for r in range(len(all_features)+1)):
if len(z) == 0:
# Skip the empty set
continue
all_features_power_set.append(z)
print('\nEvaluating %d feature combinations on %d samples ...' % (len(all_features_power_set), len(X)))
train_means, train_stds = [], []
test_means, test_stds = [], []
for idx, feature_set in enumerate(all_features_power_set):
print('(%.3d/%.3d): evaluating %s ...' % (idx+1, len(all_features_power_set), feature_set, ))
current_set_start = timeit.default_timer()
indexes = feature_indexes_from_set(all_features, feature_set, lengths)
X_curr = [X_curr[:, indexes] for X_curr in X] # right now this makes a copy of the sub array
assert len(X_curr) == len(X)
assert X_curr[0].shape[1] == len(indexes)
train_mean, train_std, test_mean, test_std = evaluate(X_curr, args)
print('train: %f +-%f' % (train_mean, train_std))
print('test: %f +-%f' % (test_mean, test_std))
print('shape: %s' % str(X_curr[0].shape))
print('done, took %fs\n' % (timeit.default_timer() - current_set_start))
# Bookkeeping
train_means.append(train_mean)
train_stds.append(train_std)
test_means.append(test_mean)
test_stds.append(test_std)
assert len(train_means) == len(train_stds) == len(test_means) == len(test_stds) == len(all_features_power_set)
# Calculate best feature set and report
best_train_idx = np.argmax(np.array(train_means))
best_test_idx = np.argmax(np.array(test_means))
print('Results:')
print('total time: %fs' % (timeit.default_timer() - start))
print('best feature set on train set with score %f: %s' % (train_means[best_train_idx], all_features_power_set[best_train_idx]))
print('best feature set on test set with score %f: %s' % (test_means[best_test_idx], all_features_power_set[best_test_idx]))
# Save results
if args.output:
print('\nSaving results to "%s" ...' % args.output)
fieldnames = ['features', 'train_mean', 'train_std', 'test_mean', 'test_std']
with open(args.output, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for features, train_mean, train_std, test_mean, test_std in zip(all_features_power_set, train_means,
train_stds, test_means, test_stds):
writer.writerow({'features': str(features),
'train_mean': train_mean,
'train_std': train_std,
'test_mean': test_mean,
'test_std': test_std})
print('done')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main(get_parser().parse_args())
| mit |
oduwa/Wheat-Count | PicNumero/build_classifier.py | 2 | 8319 | import Display
import Helper
from skimage.color import rgb2gray
import numpy as np
from scipy import misc
from sklearn import svm, grid_search, metrics
from sklearn.neural_network import MLPClassifier
from skimage.feature import greycomatrix, greycoprops
from skimage import img_as_ubyte, io
from sklearn import decomposition
import matplotlib.pyplot as plt
import string
import random
import os, sys
import tqdm
# The name of the file where we will store serialized classifier
MLP_FILE = '../Models/MLP_glcmdistance1.data'#'../Models/MLP.data'
def get_textural_features(img):
img = img_as_ubyte(rgb2gray(img))
glcm = greycomatrix(img, [1], [0], 256, symmetric=True, normed=True)
dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0]
correlation = greycoprops(glcm, 'correlation')[0, 0]
homogeneity = greycoprops(glcm, 'homogeneity')[0, 0]
energy = greycoprops(glcm, 'energy')[0, 0]
feature = np.array([dissimilarity, correlation, homogeneity, energy])
return feature
## featureRepresentation = {'image', 'pca', 'glcm'}
def main(featureRepresentation='image'):
# Load train data
train_filenames = []
for filename in os.listdir("../train/positive"):
if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
train_targets = [1]*(len(os.listdir("../train/positive"))-1)
for filename in os.listdir("../train/negative"):
if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)
n_train_samples = len(train_filenames)
if(featureRepresentation == 'glcm'):
sample_size = 4
else:
sample_size = 20*20
train_data = np.zeros((n_train_samples, sample_size))
i = 0
for filename in train_filenames:
img = io.imread(filename)
if(featureRepresentation == 'image'):
train_data[i] = img.flatten()
elif(featureRepresentation == 'pca'):
train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
elif(featureRepresentation == 'glcm'):
train_data[i] = get_textural_features(img)
i = i + 1;
# Apply pca to compute reduced representation in case needed
#train_data_reduced = decomposition.PCA(n_components=8).fit_transform(train_data)
# Load test data
test_filenames = []
expected = []
for filename in os.listdir("test"):
if(filename != ".DS_Store"):
test_filenames.append("../test/" + filename)
expected.append(int(filename.split('_')[1].split('.')[0]))
n_test_samples = len(test_filenames)
test_data = np.zeros((n_test_samples, sample_size))
i = 0
for filename in test_filenames:
img = io.imread(filename)
if(featureRepresentation == 'image'):
test_data[i] = img.flatten()
elif(featureRepresentation == 'pca'):
test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
elif(featureRepresentation == 'glcm'):
test_data[i] = get_textural_features(img)
i = i + 1;
# Apply pca to compute reduced representation in case needed
#test_data_reduced = decomposition.PCA(n_components=8).fit_transform(test_data)
# Create a classifier: a support vector classifier
# param_grid = {'C': [1e0, 5e0, 1e1, 5e1, 1e2, 5e2, 1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.05, 0.01, 0.5, 0.1], 'kernel': ['rbf', 'poly'] }
# clf = grid_search.GridSearchCV(svm.SVC(kernel='rbf', class_weight='balanced'), param_grid)
# clf.fit(train_data, train_targets)
# print(clf.best_estimator_)
# classifier = clf.best_estimator_
#classifier = svm.SVC()
classifier = MLPClassifier()
param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
clf = grid_search.GridSearchCV(MLPClassifier(), param_grid)
clf.fit(train_data, train_targets)
print(clf);
classifier = clf
# Get previous model and assess
serialized_classifier = Helper.unserialize(MLP_FILE)
predictions = serialized_classifier.predict(test_data)
confusion_matrix = metrics.confusion_matrix(expected, predictions)
print("Old Confusion matrix:\n%s" % confusion_matrix)
serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
predictions = classifier.predict(test_data)
confusion_matrix = metrics.confusion_matrix(expected, predictions)
print("New Confusion matrix:\n%s" % confusion_matrix)
n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
if(n_correct > serialized_n_correct):
Helper.serialize(MLP_FILE, classifier)
print("SAVED MODEL")
## featureRepresentation = {'image', 'pca', 'glcm'}
def generate_model(featureRepresentation='image', iters=10):
# Load train data
train_filenames = []
for filename in os.listdir("../train/positive"):
if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
train_targets = [1]*(len(os.listdir("../train/positive"))-1)
for filename in os.listdir("../train/negative"):
if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)
n_train_samples = len(train_filenames)
if(featureRepresentation == 'glcm'):
sample_size = 4
else:
sample_size = 20*20
train_data = np.zeros((n_train_samples, sample_size))
i = 0
for filename in train_filenames:
img = io.imread(filename)
if(featureRepresentation == 'image'):
train_data[i] = img.flatten()
elif(featureRepresentation == 'pca'):
train_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
elif(featureRepresentation == 'glcm'):
train_data[i] = get_textural_features(img)
i = i + 1;
# Load test data
test_filenames = []
expected = []
for filename in os.listdir("test"):
if(filename != ".DS_Store"):
test_filenames.append("../test/" + filename)
expected.append(int(filename.split('_')[1].split('.')[0]))
n_test_samples = len(test_filenames)
test_data = np.zeros((n_test_samples, sample_size))
i = 0
for filename in test_filenames:
img = io.imread(filename)
if(featureRepresentation == 'image'):
test_data[i] = img.flatten()
elif(featureRepresentation == 'pca'):
test_data[i] = ecomposition.PCA(n_components=8).fit_transform(img.flatten())
elif(featureRepresentation == 'glcm'):
test_data[i] = get_textural_features(img)
i = i + 1;
# Perform build iterations
for i in tqdm.tqdm(range(0, iters)):
# Build Classifier
param_grid = {"algorithm":["l-bfgs", "sgd", "adam"], "activation":["logistic", "relu", "tanh"], "hidden_layer_sizes":[(5,2), (5), (100), (150), (200)] }
classifier = grid_search.GridSearchCV(MLPClassifier(), param_grid)
classifier.fit(train_data, train_targets)
# Get previous classifier and assess
serialized_classifier = Helper.unserialize(MLP_FILE)
if(serialized_classifier):
predictions = serialized_classifier.predict(test_data)
confusion_matrix = metrics.confusion_matrix(expected, predictions)
serialized_n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
predictions = classifier.predict(test_data)
confusion_matrix = metrics.confusion_matrix(expected, predictions)
n_correct = confusion_matrix[0][0] + confusion_matrix[1][1]
if(n_correct > serialized_n_correct):
Helper.serialize(MLP_FILE, classifier)
else:
Helper.serialize(MLP_FILE, classifier)
# Display final model performance
serialized_classifier = Helper.unserialize(MLP_FILE)
predictions = serialized_classifier.predict(test_data)
confusion_matrix = metrics.confusion_matrix(expected, predictions)
print("Old Confusion matrix:\n%s" % confusion_matrix)
#main('glcm');
generate_model('glcm', iters=20)
| mit |
eickenberg/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/groupby/ops.py | 1 | 29237 | """
Provide classes to perform the groupby aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the SeriesGroupBy and DataFrameGroupBy objects.
"""
import collections
import numpy as np
from pandas._libs import NaT, groupby as libgroupby, iNaT, lib, reduction
from pandas.compat import lzip, range, zip
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_float64, ensure_int64, ensure_int64_or_float64, ensure_object,
ensure_platform_int, is_bool_dtype, is_categorical_dtype, is_complex_dtype,
is_datetime64_any_dtype, is_integer_dtype, is_numeric_dtype,
is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import _maybe_fill, isna
import pandas.core.algorithms as algorithms
from pandas.core.base import SelectionMixin
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index, decons_obs_group_ids, get_flattened_iterator,
get_group_index, get_group_index_sorter, get_indexer_dict)
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : int
the axis to group
groupings : array of grouping
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : boolean, default True
whether this grouper will give sorted result or not
group_keys : boolean, default True
mutated : boolean, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False, indexer=None):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids,
ngroups,
self.levels,
self.labels)
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com.get_callable_name(f)
if (f_name not in base.plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except reduction.InvalidApply:
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [com.values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = ensure_platform_int(ids)
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = ids
return Series(out,
index=self.result_index,
dtype='int64')
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
@cache_readonly
def label_info(self):
# return the labels of items in original grouped axis
labels, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((labels, self.indexer))
labels = labels[sorter]
return labels
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(
comp_ids, obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
codes = self.recons_labels
levels = [ping.result_index for ping in self.groupings]
result = MultiIndex(levels=levels,
codes=codes,
verify_integrity=False,
names=self.names)
return result
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].result_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = ensure_platform_int(labels)
levels = ping.result_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
'cummin': 'group_cummin',
'cummax': 'group_cummax',
'rank': {
'name': 'group_rank',
'f': lambda func, a, b, c, d, **kwargs: func(
a, b, c, d,
kwargs.get('ties_method', 'average'),
kwargs.get('ascending', True),
kwargs.get('pct', False),
kwargs.get('na_option', 'keep')
)
}
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(libgroupby, "{fname}_{dtype_str}".format(
fname=fname, dtype_str=dt), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError(
"function is not implemented for this dtype: "
"[how->{how},dtype->{dtype_str}]".format(how=how,
dtype_str=dtype_str))
return func
def _cython_operation(self, kind, values, how, axis, min_count=-1,
**kwargs):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError(
"categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ['add', 'prod', 'cumsum', 'cumprod']:
raise NotImplementedError(
"datetime64 type does not support {} "
"operations".format(how))
elif is_timedelta64_dtype(values):
if how in ['prod', 'cumprod']:
raise NotImplementedError(
"timedelta64 type does not support {} "
"operations".format(how))
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == iNaT).any():
values = ensure_float64(values)
else:
values = ensure_int64_or_float64(values)
elif is_numeric and not is_complex_dtype(values):
values = ensure_float64(values)
else:
values = values.astype(object)
try:
func = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = ensure_float64(values)
func = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if how == 'rank':
out_dtype = 'float'
else:
if is_numeric:
out_dtype = '{kind}{itemsize}'.format(
kind=values.dtype.kind, itemsize=values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric,
is_datetimelike, min_count)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# TODO: min_count
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike,
**kwargs)
if is_integer_dtype(result) and not is_datetimelike:
mask = result == iNaT
if mask.any():
result = result.astype('float64')
result[mask] = np.nan
if (kind == 'aggregate' and
self._filter_empty_groups and not counts.all()):
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0, min_count=-1):
return self._cython_operation('aggregate', values, how, axis,
min_count=min_count)
def transform(self, values, how, axis=0, **kwargs):
return self._cython_operation('transform', values, how, axis, **kwargs)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric, is_datetimelike, min_count=-1):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids,
min_count)
else:
agg_func(result, counts, values, comp_ids, min_count)
return result
def _transform(self, result, values, comp_ids, transform_func,
is_numeric, is_datetimelike, **kwargs):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike, **kwargs)
else:
transform_func(result, values, comp_ids, is_datetimelike, **kwargs)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj._take(indexer).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray))):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
filter_empty : boolean, default False
mutated : boolean, default False
indexer : a intp array
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
def __init__(self, bins, binlabels, filter_empty=False, mutated=False,
indexer=None):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
self.indexer = indexer
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {key: value for key, value in zip(self.binlabels, self.bins)
if key is not NaT}
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (comp_ids.astype('int64', copy=False),
obs_group_ids.astype('int64', copy=False),
ngroups)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
from pandas.core.groupby.grouper import Grouping
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data._take(self.sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except Exception:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = reduction.apply_frame_axis0(sdata, f, names,
starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
| bsd-3-clause |
dsquareindia/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 46 | 2640 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
luckyharryji/smoking-modeling | smoking/map/test_map.py | 1 | 1030 | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import json
class Map(object):
def __init__(self):
self.m = Basemap(llcrnrlon=-119,llcrnrlat=22,urcrnrlon=-64,urcrnrlat=49,projection='lcc',lat_1=33,lat_2=45,lon_0=-95,resolution='c')
self.min_marker_size = 2.5
def initial_map(self):
self.m.drawcoastlines()
self.m.drawstates()
self.m.drawcountries()
def plot_point(self, URL, r, marker):
with open(URL) as f_in:
_points = json.load(f_in)['center']
for geo in _points:
x,y = self.m(geo[0], geo[1])
msize = r * self.min_marker_size
marker_string = marker
self.m.plot(x, y, marker_string, markersize=msize)
if __name__ == '__main__' :
plt.figure(figsize=(10,8))
new_map = Map()
new_map.initial_map()
new_map.plot_point('random_center.json',3,'go')
new_map.plot_point('smoke_center.json',4,'ro')
plt.title('Smoking')
plt.show()
| mit |
aymen82/kaggler-competitions-scripts | dev/rossman/rossmann.py | 1 | 11046 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%matplotlib inline
import pandas as pd
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
# <codecell>
def rmspe(tru, pred):
if tru==0.0 or isinstance(tru, str) or np.isnan(tru) or np.isnan(pred):
return 0.0
return (float(pred-tru)/tru)**2
def eval(trues, preds):
return np.mean([rmspe(t,p) for t,p in zip(trues, preds)])
# <codecell>
train_file = "/home/gv/ashabou/data/ds/rossmann/train.csv"
test_file = "/home/gv/ashabou/data/ds/rossmann/test.csv"
store_file = "/home/gv/ashabou/data/ds/rossmann/store.csv"
# <codecell>
train = pd.read_csv( train_file )
test = pd.read_csv( test_file )
store = pd.read_csv( store_file )
# <codecell>
print train.head(5)
print test.head(5)
print train.count()
print test.count()
print store.head(10)
print store.count()
# <codecell>
train['Store'].hist()
print train['Store'].min()
print train['Store'].max()
test['Store'].hist()
print test['Store'].min()
print test['Store'].max()
# <codecell>
train['DayOfWeek'].hist()
test['DayOfWeek'].hist()
# <codecell>
train['Sales'].loc[train['Sales']>0].hist(bins=100)
print (train['Sales']==0).sum()
# <codecell>
train['Customers'].loc[train['Customers']>0].hist(bins=100)
print (train['Customers']==0).sum()
print train['Customers'].loc[train['Customers']>0].describe()
# <codecell>
#0 sales while there are some customers
train.loc[(train['Customers']>0) & (train['Sales']==0)]
# <codecell>
plt.figure()
train[['Sales', 'Customers']].plot(x='Sales', y='Customers', style='bo')
plt.show()
# <codecell>
#from pandas.tools.plotting import scatter_matrix
#_ = scatter_matrix(train[['Sales', 'Store', 'Customers']], figsize=(14, 10))
# <codecell>
plt.figure()
train.query('Open==1')[['Sales', 'Store']].plot(x='Sales', y='Store', style='bo')
plt.show()
# <codecell>
train.query('Open==1')[['Sales', 'DayOfWeek']].plot(x='Sales', y='DayOfWeek', style='bo')
# <codecell>
train.query('Open==1')[['Sales', 'Promo']].plot(x='Sales', y='Promo', style='bo')
# <codecell>
train.query('Open==1')[['Store', 'Promo']].plot(x='Store', y='Promo', style='bo')
# <codecell>
train.query('Open==1')[['Store', 'Promo']].groupby('Store').sum().plot()
print train.query('Open==1')[['Store', 'Promo']].groupby('Store').sum().sort('Promo',ascending=0).head(10)
# <codecell>
train.query('Open==1')[['Store', 'Sales']].groupby('Store').sum().plot()
print train.query('Open==1')[['Store', 'Sales']].groupby('Store').sum().sort('Sales',ascending=0).head(5)
# <codecell>
#not stores that have more propmo have more sales
print train.query('Open==1')[['Store', 'Sales','Promo']].groupby('Store').sum().sort('Sales',ascending=0).head(5)
# <codecell>
train.query('Open==1')[['Store', 'Sales','DayOfWeek']].groupby(['Store','DayOfWeek']).sum()
# <codecell>
train['Year'] = train['Date'].map(lambda x: int(x.split("-")[0]))
train['Month'] = train['Date'].map(lambda x: int(x.split("-")[1]))
train['Day'] = train['Date'].map(lambda x: int(x.split("-")[2]))
train.head(5)
# <codecell>
print set(train['Year'].tolist())
# <codecell>
print set(train.query("Open==1")['Month'].tolist())
# <codecell>
print "2013-->", set(train.query("Open==1 & Year==2013")['Month'].tolist())
print "2014-->", set(train.query("Open==1 & Year==2014")['Month'].tolist())
print "2015-->", set(train.query("Open==1 & Year==2015")['Month'].tolist())
# <codecell>
test['Year'] = test['Date'].map(lambda x: int(x.split("-")[0]))
test['Month'] = test['Date'].map(lambda x: int(x.split("-")[1]))
test['Day'] = test['Date'].map(lambda x: int(x.split("-")[2]))
test.head(5)
# <codecell>
print set(test['Year'].tolist())
# <codecell>
print set(test.query("Open==1")['Month'].tolist())
# <codecell>
data = train.query("Open==1").query('Month==6')[['Sales','Year','Store']].groupby(['Store','Year']).sum()
print data.head(20)
data.query('Year==2013').plot()
data.query('Year==2014').plot()
data.query('Year==2015').plot()
# <codecell>
data = train.query("Open==1").query('Month==8 | Month==9')[['Sales','Store','Month','Year']].groupby(['Store','Month']).median()
data.rename(columns={'Sales': 'Median'}, inplace=True)
print data.head(10)
# <codecell>
valid = train.copy().query('(Month==8 | Month==9) & Year==2014')
valid['tmp'] = train['Open'].map(lambda x: 1 if x == 1 else 0)
print valid.head(5)
# <codecell>
ttrain = train.query("Open==1").query('(Month==8 | Month==9) & Year==2013')[['Sales','Store','Month']].groupby(['Store','Month']).median()
ttrain.rename(columns={'Sales': 'Median'}, inplace=True)
print ttrain.head(10)
out = valid.join(ttrain, on=['Store','Month'])
print out.head(5)
# <codecell>
out['preds'] = out['tmp']*out['Median']
print out.shape
print out[['Sales','preds']].head(5)
print eval(out['Sales'].tolist(), out['preds'].tolist())
# <codecell>
test['tmp'] = test['Open'].map(lambda x: 1 if x == 1 else 0)
print test.head(5)
# <codecell>
out = test[['Id','Store','tmp','Month']].join(data, on=['Store','Month'])#.sort('Id')
print out.head(10)
print out.query('Store==1 & Month==8').head(10)
print out.query('Store==1 & Month==9').head(10)
# <codecell>
out['Sales'] = out['tmp']*out['Median']
print out.head(10)
print out.query('Store==1 & Month==8').head(10)
print out.query('Store==1 & Month==9').head(10)
# <codecell>
out = out[['Id','Sales']]
print out.head(10)
# <codecell>
out.to_csv('/home/gv/ashabou/mycsv.csv', index=False)
# <codecell>
data_2013 = train.query('(Month==8 | Month==9) & Year==2013')[['Sales','Store', 'Day','Month']].sort(['Store','Month', 'Day'])
data_2013.index=range(1, len(data_2013) + 1)
print data_2013.head(10)
data_2014 = train.query('(Month==8 | Month==9) & Year==2014')[['Sales','Store','Day','Month']].sort(['Store','Month', 'Day'])
data_2014.index=range(1, len(data_2014) + 1)
print data_2014.head(10)
# <codecell>
join1314 = data_2013.join(data_2014.groupby(['Store','Month','Day']).mean().rename(columns={'Sales': 'Median'}), on=['Store','Month','Day'])
print join1314.head(5)
# <codecell>
sales = join1314['Sales'][1:]
sales.index =range(1, len(sales) + 1)
median = join1314['Median'][:-1]
median.index =range(1, len(median) + 1)
ct = pd.concat([sales, median], axis=1)
print ct.head(5)
ct.plot()
print sales.describe()
print median.describe()
print eval(sales.tolist(), median.tolist())
# <codecell>
join_indexed = join1314.groupby(['Store','Month','Day']).median()
print join_indexed.head(5)
# <codecell>
out = test[['Id','Store','tmp','Month','Day']].join(join_indexed, on=['Store','Month','Day'])#.sort('Id')
out.rename(columns={'Sales': 'y2013', 'Median':'y2014'}, inplace=True)
print out.head(10)
print out.query('Store==1 & Month==8').head(10)
print out.query('Store==1 & Month==9').head(10)
# <codecell>
out['Sales']=[0]*len(out['tmp'])
print out.head(30)
# <codecell>
def computeValue(x,y):
if np.isnan(x) or x==0:
return y
if np.isnan(y) or y==0:
return x
return (x+y)/2.0
out = out.sort(['Store','Month','Day'])
out['Sales']=[computeValue(x,y) for x,y in zip(out['y2014'].tolist()[1:-1], out['y2013'].tolist()[2:])]+\
[computeValue(x,y) for x,y in zip(out['y2014'].tolist()[-2:], out['y2013'].tolist()[-2:])]
out['Sales'] = out['Sales']*out['tmp']
print out.head(1000)
# <codecell>
print out.query('tmp==0').shape
print out.query('y2013==0').shape
print out.query('y2014==0').shape
print out.query('Sales==0').shape
# <codecell>
out_csv = out[['Id','Sales']].sort('Id')
print out_csv.head(10)
# <codecell>
out_csv.to_csv('/home/gv/ashabou/mycsv2.csv', index=False)
# <codecell>
data = train.query('Year==2013 | Year==2014')[['Sales','Store', 'DayOfWeek','Promo','SchoolHoliday','StateHoliday']].sort(['Store','DayOfWeek'])
print data.head(10)
print set(data['Promo'].tolist()), set(data['SchoolHoliday'].tolist()), set(data['StateHoliday'].tolist())
data['StateHoliday'][data['StateHoliday']==0]='0'
print set(data['Promo'].tolist()), set(data['SchoolHoliday'].tolist()), set(data['StateHoliday'].tolist())
# <codecell>
data_indexed = data.groupby(['Store','DayOfWeek','Promo','SchoolHoliday','StateHoliday']).median()
data_indexed.rename(columns={'Sales': 'Median'}, inplace=True)
print data_indexed.head(5)
data_valid = data.join(data_indexed, on=['Store','DayOfWeek','Promo','SchoolHoliday','StateHoliday'])
print data_valid.head(10)
print eval(data_valid['Sales'].tolist(), data_valid['Median'].tolist())
out = test[['Id','Store','tmp','Day','DayOfWeek','Promo','SchoolHoliday','StateHoliday']].join(data_indexed, on=['Store','DayOfWeek','Promo','SchoolHoliday','StateHoliday'])#.sort('Id')
print out.head(10)
print set(test['Promo'].tolist()), set(test['SchoolHoliday'].tolist()), set(test['StateHoliday'].tolist())
# <codecell>
idx_nan = pd.isnull(out['Median']).nonzero()[0]
print idx_nan
data_indexed2 = data[['Store','DayOfWeek','Promo','Sales']].groupby(['Store','DayOfWeek','Promo']).median()
data_indexed2.rename(columns={'Sales': 'Median'}, inplace=True)
print data_indexed2.head(5)
out2 = test[['Id','Store','tmp','Day','DayOfWeek','Promo','SchoolHoliday','StateHoliday']].join(data_indexed2, on=['Store','DayOfWeek','Promo'])
print pd.isnull(out2['Median']).nonzero()
out3 = out.copy()
out3['Median'][idx_nan] = out2['Median'][idx_nan]
print pd.isnull(out3['Median']).nonzero()
# <codecell>
out3['Sales'] = out3['Median']*out3['tmp']
print out3.sort('Store').head(10)
# <codecell>
out3[['Id','Sales']].to_csv('/home/gv/ashabou/mycsv3.csv', index=False)
# <codecell>
print store.head(10)
print store.count()
print 'nb stores=', len(train['Store'].unique())
# <codecell>
data = train[['Store', 'DayOfWeek','Promo','Sales']].merge(store[['Store','StoreType','Assortment','Promo2']], on='Store', how='left')
print data.head(10)
# <codecell>
data_indexed = data.groupby(['Store','DayOfWeek','Promo','StoreType','Assortment','Promo2']).median()
data_indexed.rename(columns={'Sales': 'Median'}, inplace=True)
print data_indexed.head(5)
data_valid = data.join(data_indexed, on=['Store','DayOfWeek','Promo','StoreType','Assortment','Promo2'])
print data_valid.head(10)
print eval(data_valid['Sales'].tolist(), data_valid['Median'].tolist())
#out = test[['Id','Store','tmp','Day','DayOfWeek','Promo','SchoolHoliday','StateHoliday']].join(data_indexed, on=['Store','DayOfWeek','Promo','SchoolHoliday','StateHoliday'])#.sort('Id')
#print out.head(10)
#print set(test['Promo'].tolist()), set(test['SchoolHoliday'].tolist()), set(test['StateHoliday'].tolist())
# <codecell>
data_test = test[['Id', 'Store', 'tmp', 'DayOfWeek','Promo']].merge(store[['Store','StoreType','Assortment','Promo2']], on='Store', how='left')
print data.head(10)
out = data_test.join(data_indexed, on=['Store','DayOfWeek','Promo','StoreType','Assortment','Promo2'])#.sort('Id')
print out.head(10)
out['Sales'] = out['Median']*out['tmp']
print out.sort('Store').head(10)
print 'NAN=', pd.isnull(out['Median']).nonzero()[0]
out[['Id','Sales']].to_csv('/home/gv/ashabou/mycsv4.csv', index=False)
# <codecell>
| bsd-3-clause |
AIML/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nmayorov/scikit-learn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/tools/tests/test_tile.py | 1 | 7578 | import os
import nose
import numpy as np
from pandas.compat import zip
from pandas import DataFrame, Series, unique
import pandas.util.testing as tm
from pandas.util.testing import assertRaisesRegexp
import pandas.core.common as com
from pandas.core.algorithms import quantile
from pandas.tools.tile import cut, qcut
import pandas.tools.tile as tmod
from numpy.testing import assert_equal, assert_almost_equal
class TestCut(tm.TestCase):
def test_simple(self):
data = np.ones(5)
result = cut(data, 4, labels=False)
desired = [1, 1, 1, 1, 1]
assert_equal(result, desired)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
assert_equal(result.labels, [0, 0, 0, 1, 2, 0])
assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
assert_equal(result.labels, [0, 0, 0, 2, 3, 0, 0])
assert_almost_equal(bins, [0.1905, 2.575, 4.95, 7.325, 9.7])
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
assert_equal(result.labels, [0, 0, 0, 2, 3, 0, 1])
assert_almost_equal(bins, [0.2, 2.575, 4.95, 7.325, 9.7095])
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
assert_equal(result.labels, [0, 0, 0, 1, 2, 0])
assert_almost_equal(bins, [0.1905, 3.36666667, 6.53333333, 9.7])
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
self.assertRaises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
self.assertRaises(ValueError, cut, [], 2)
self.assertRaises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3])
ind = cut(s, [0, 1], labels=False)
exp = [np.nan, np.nan, np.nan, 0, np.nan]
assert_almost_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = ['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]',
'(0.75, 1]']
self.assert_(np.array_equal(result.levels, ex_levels))
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = ['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)',
'[0.75, 1.001)']
self.assert_(np.array_equal(result.levels, ex_levels))
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
self.assertEquals(factor.name, 'foo')
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = ['(-0.00072, 0.18]', '(0.18, 0.36]', '(0.36, 0.54]',
'(0.54, 0.72]']
self.assert_(np.array_equal(result.levels, ex_levels))
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(com.isnull(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(com.isnull(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data,dtype='int64')
result = cut(data, [-np.inf, 2, 4, np.inf])
result_ser = cut(data_ser, [-np.inf, 2, 4, np.inf])
ex_levels = ['(-inf, 2]', '(2, 4]', '(4, inf]']
np.testing.assert_array_equal(result.levels, ex_levels)
np.testing.assert_array_equal(result_ser.levels, ex_levels)
self.assertEquals(result[5], '(4, inf]')
self.assertEquals(result[0], '(-inf, 2]')
self.assertEquals(result_ser[5], '(4, inf]')
self.assertEquals(result_ser[0], '(-inf, 2]')
def test_qcut(self):
arr = np.random.randn(1000)
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
assert_almost_equal(bins, ex_bins)
ex_levels = cut(arr, ex_bins, include_lowest=True)
self.assert_(np.array_equal(labels, ex_levels))
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
self.assert_(len(np.unique(factor)) == 10)
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
self.assert_(factor.equals(expected))
def test_qcut_all_bins_same(self):
assertRaisesRegexp(ValueError, "edges.*unique", qcut, [0,0,0,0,0,0,0,0,0,0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = result.labels == -1
ex_mask = (arr < -1) | (arr > 1)
self.assert_(np.array_equal(mask, ex_mask))
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = cut(arr, bins)
exp.levels = labels
self.assert_(result.equals(exp))
def test_qcut_include_lowest(self):
values = np.arange(10)
cats = qcut(values, 4)
ex_levels = ['[0, 2.25]', '(2.25, 4.5]', '(4.5, 6.75]', '(6.75, 9]']
self.assert_((cats.levels == ex_levels).all())
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
self.assert_(com.isnull(result[:20]).all())
def test_label_formatting(self):
self.assertEquals(tmod._trim_zeros('1.000'), '1')
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._format_label(-117.9998, precision=3)
self.assertEquals(result, '-118')
result = tmod._format_label(117.9998, precision=3)
self.assertEquals(result, '118')
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(curpath(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in result.levels:
s, e = lev[1:-1].split(',')
self.assertTrue(s != e)
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
self.assertTrue(sp < sn)
self.assertTrue(ep < en)
self.assertTrue(ep <= sn)
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
Dhivyap/ansible | hacking/aws_config/build_iam_policy_framework.py | 25 | 11861 | # Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)
| gpl-3.0 |
arahuja/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 3 | 5300 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/tree/tree.py | 1 | 29287 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Licence: BSD 3 clause
from __future__ import division
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import array2d, check_random_state
from ..utils.fixes import unique
from ..utils.validation import check_arrays
from ._tree import Criterion, Splitter, Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE}
SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_features = max_features
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True,
sample_weight=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32`` for maximum
efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Deprecations
if sample_mask is not None:
warn("The sample_mask parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if X_argsorted is not None:
warn("The X_argsorted parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
# Convert data
if check_input:
X, = check_arrays(X, dtype=DTYPE, sparse_format="dense",
check_ccontiguous=True)
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = (2 ** 31) - 1 if self.max_depth is None else self.max_depth
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_,
self.n_outputs_, splitter, max_depth,
min_samples_split, self.min_samples_leaf)
self.tree_.build(X, y, sample_weight=sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`classes_` : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
`n_classes_` : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
`feature_importances_` : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`max_features_` : int,
The infered value of max_features.
`feature_importances_` : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
random_state=None,
min_density=None,
compute_importances=None):
super(DecisionTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeClassifier, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
max_features="auto",
random_state=None,
min_density=None,
compute_importances=None):
super(ExtraTreeRegressor, self).__init__(criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
max_features,
random_state)
if min_density is not None:
warn("The min_density parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
if compute_importances is not None:
warn("Setting compute_importances is no longer required as "
"version 0.14. Variable importances are now computed on the "
"fly when accessing the feature_importances_ attribute. "
"This parameter will be removed in 0.16.",
DeprecationWarning)
| bsd-3-clause |
Adai0808/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
wcalvert/LPC11U_LPC13U_CodeBase | src/drivers/filters/iir/python/iir_f_noisysine_test.py | 2 | 2628 | #-------------------------------------------------------------------------------
# Name: iir_f_tester
#
# Purpose: Displays IIR output of a sine wave with optional random noise
#
# Author: K. Townsend (microBuilder.eu)
#
# Created: 05/05/2013
# Copyright: (c) K. Townsend 2013
# Licence: BSD
#
# This module requires the following libs
# matplotlib - http://matplotlib.org/
# numpy - http://www.numpy.org/
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def main():
avg = 0.0
current = 0
iirvals = []
# Get alpha (determines how 'quickly' the filter responds to changes)
alpha = float(input("IIR alpha [0..1.0]: "))
# Set the noise level for the input sine wave
noiselevel = float(input("Input noise level [0..1.0]: "))
# Set the number of samples to use
samples = float(input("Number of samples: "))
# Check bounds
if (alpha > 1.0):
print ('Setting alpha to 1.0')
alpha = 1.0
if (alpha < 0):
print ('Setting alpha to 0.0')
alpha = 0.0
if (noiselevel > 1.0):
print ('Setting noise level to 1.0')
noiselevel = 1.0
if (noiselevel < 0):
print ('Setting noise level to 0.0')
noiselevel = 0.0
if (samples < 0):
print ('Setting samples to 100')
samples = 100
# Generate a sine wave with some noise on it
x = np.linspace(0, 4*np.pi, samples)
sine = np.sin(x)
noise = np.random.uniform(-1, 1, size=len(x)) * noiselevel
noisysine = sine + noise
# Run the IIR filter over the entire input dataset
while current < len(x):
current+=1
# Add one sample to the IIR filter
avg = iirAddValue(avg, alpha, noisysine[current-1])
# Plot IIR filtered value
iirvals.append(avg);
print ("%d: %g" % (current, avg))
# Display the results
plt.title("Sine Wave Input vs. IIR Output \n (Alpha: %g, Noise Level: %g)"
% (alpha, noiselevel))
plt.xlabel('Samples')
plt.ylabel('Values')
plt.ylim(noisysine.min()*1.1, noisysine.max()*1.1)
plt.grid(True)
plt.plot(noisysine,
color="blue",
alpha = 0.4,
linestyle="-",
label="Raw Input")
plt.plot(iirvals,
color="red",
linewidth='1.5',
linestyle="-",
label="IIR Output")
plt.legend()
plt.show()
pass
def iirAddValue(avg, alpha, val):
"Adds a new value to the IIR filter"
return alpha * val + (1.0 - alpha) * avg
if __name__ == '__main__':
main()
| bsd-3-clause |
jjx02230808/project0223 | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/ipykernel/pylab/config.py | 10 | 4485 | """Configurable for configuring the IPython inline backend
This module does not import anything from matplotlib.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from traitlets.config.configurable import SingletonConfigurable
from traitlets import (
Dict, Instance, Set, Bool, TraitError, Unicode
)
#-----------------------------------------------------------------------------
# Configurable for inline backend options
#-----------------------------------------------------------------------------
def pil_available():
"""Test if PIL/Pillow is available"""
out = False
try:
from PIL import Image
out = True
except:
pass
return out
# inherit from InlineBackendConfig for deprecation purposes
class InlineBackendConfig(SingletonConfigurable):
pass
class InlineBackend(InlineBackendConfig):
"""An object to store configuration of the inline backend."""
# The typical default figure size is too large for inline use,
# so we shrink the figure size to 6x4, and tweak fonts to
# make that fit.
rc = Dict({'figure.figsize': (6.0,4.0),
# play nicely with white background in the Qt and notebook frontend
'figure.facecolor': (1,1,1,0),
'figure.edgecolor': (1,1,1,0),
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'font.size': 10,
# 72 dpi matches SVG/qtconsole
# this only affects PNG export, as SVG has no dpi setting
'figure.dpi': 72,
# 10pt still needs a little more room on the xlabel:
'figure.subplot.bottom' : .125
},
help="""Subset of matplotlib rcParams that should be different for the
inline backend."""
).tag(config=True)
figure_formats = Set({'png'},
help="""A set of figure formats to enable: 'png',
'retina', 'jpeg', 'svg', 'pdf'.""").tag(config=True)
def _update_figure_formatters(self):
if self.shell is not None:
from IPython.core.pylabtools import select_figure_formats
select_figure_formats(self.shell, self.figure_formats, **self.print_figure_kwargs)
def _figure_formats_changed(self, name, old, new):
if 'jpg' in new or 'jpeg' in new:
if not pil_available():
raise TraitError("Requires PIL/Pillow for JPG figures")
self._update_figure_formatters()
figure_format = Unicode(help="""The figure format to enable (deprecated
use `figure_formats` instead)""").tag(config=True)
def _figure_format_changed(self, name, old, new):
if new:
self.figure_formats = {new}
print_figure_kwargs = Dict({'bbox_inches' : 'tight'},
help="""Extra kwargs to be passed to fig.canvas.print_figure.
Logical examples include: bbox_inches, quality (for jpeg figures), etc.
"""
).tag(config=True)
_print_figure_kwargs_changed = _update_figure_formatters
close_figures = Bool(True,
help="""Close all figures at the end of each cell.
When True, ensures that each cell starts with no active figures, but it
also means that one must keep track of references in order to edit or
redraw figures in subsequent cells. This mode is ideal for the notebook,
where residual plots from other cells might be surprising.
When False, one must call figure() to create new figures. This means
that gcf() and getfigs() can reference figures created in other cells,
and the active figure can continue to be edited with pylab/pyplot
methods that reference the current active figure. This mode facilitates
iterative editing of figures, and behaves most consistently with
other matplotlib backends, but figure barriers between cells must
be explicit.
""").tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
| gpl-3.0 |
binhqnguyen/ln | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
graphistry/pygraphistry | docs/source/conf.py | 1 | 7678 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os, sys
from distutils.version import LooseVersion
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
import graphistry
# -- Project information -----------------------------------------------------
project = 'PyGraphistry'
copyright = '2021, Graphistry, Inc.'
author = 'Graphistry, Inc.'
# The full version, including alpha/beta/rc tags
version = LooseVersion(graphistry.__version__).vstring
relesae = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.autosummary',
#'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx_autodoc_typehints',
]
#FIXME Why is sphinx/autodoc failing here?
nitpick_ignore = [
('py:class', "<class 'dict'>"),
('py:class', "<class 'str'>"),
('py:class', 'ArrowUploader'),
('py:class', 'json.encoder.JSONEncoder'),
('py:class', 'pandas.DataFrame'),
('py:class', 'pyarrow.lib.Table'),
('py:class', 'requests.models.Response'),
('py:class', 'weakref.WeakKeyDictionary'),
('py:data', 'typing.Optional'),
('py:data', 'typing.Tuple')
]
set_type_checking_flag=True
#typehints_fully_qualified=True
always_document_param_types=True
typehints_document_rtype=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [] # '_static'
html_show_sphinx = False
html_show_sourcelink = False
htmlhelp_basename = 'PyGraphistrydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyGraphistry.tex', u'PyGraphistry Documentation',
u'Graphistry, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pygraphistry', u'PyGraphistry Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyGraphistry', u'PyGraphistry Documentation',
author, 'PyGraphistry', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'https://docs.python.org/': None} | bsd-3-clause |
VikParuchuri/simpsons-scripts | tasks/train.py | 1 | 16847 | from __future__ import division
from itertools import chain
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from fisher import pvalue
import re
import collections
from nltk.stem.porter import PorterStemmer
import math
from percept.tasks.base import Task
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
from percept.conf.base import settings
import os
from percept.tasks.train import Train
from sklearn.ensemble import RandomForestClassifier
import pickle
import random
import logging
log = logging.getLogger(__name__)
MAX_FEATURES = 500
DISTANCE_MIN=1
CHARACTER_DISTANCE_MIN = .2
RESET_SCENE_EVERY = 5
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
def return_one():
return 1
class SpellCorrector(object):
"""
Taken and slightly adapted from peter norvig's post at http://norvig.com/spell-correct.html
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
punctuation = [".", "!", "?", ","]
def __init__(self):
self.NWORDS = self.train(self.words(file(os.path.join(settings.PROJECT_PATH,'data/big.txt')).read()))
self.cache = {}
def words(self, text):
return re.findall('[a-z]+', text.lower())
def train(self, features):
model = collections.defaultdict(return_one)
for f in features:
model[f] += 1
return model
def edits1(self, word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in self.alphabet if b]
inserts = [a + c + b for a, b in splits for c in self.alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(self, word):
return set(e2 for e1 in self.edits1(word) for e2 in self.edits1(e1) if e2 in self.NWORDS)
def known(self, words): return set(w for w in words if w in self.NWORDS)
def correct(self, word):
if word in self.cache:
return self.cache[word]
suffix = ""
for p in self.punctuation:
if word.endswith(p):
suffix = p
word = word[:-1]
candidates = self.known([word]) or self.known(self.edits1(word)) or self.known_edits2(word) or [word]
newword = max(candidates, key=self.NWORDS.get) + suffix
self.cache.update({word : newword})
return newword
class Vectorizer(object):
def __init__(self):
self.fit_done = False
def fit(self, input_text, input_scores, max_features=100, min_features=3):
self.spell_corrector = SpellCorrector()
self.stemmer = PorterStemmer()
new_text = self.batch_generate_new_text(input_text)
input_text = [input_text[i] + new_text[i] for i in xrange(0,len(input_text))]
self.vectorizer1 = CountVectorizer(ngram_range=(1,2), min_df = min_features/len(input_text), max_df=.4, stop_words="english")
self.vectorizer1.fit(input_text)
self.vocab = self.get_vocab(input_text, input_scores, max_features)
self.vectorizer = CountVectorizer(ngram_range=(1,2), vocabulary=self.vocab)
self.fit_done = True
self.input_text = input_text
def spell_correct_text(self, text):
text = text.lower()
split = text.split(" ")
corrected = [self.spell_corrector.correct(w) for w in split]
return corrected
def batch_apply(self, all_tokens, applied_func):
for key in all_tokens:
cor = applied_func(all_tokens[key])
all_tokens[key] = cor
return all_tokens
def batch_generate_new_text(self, text):
text = [re.sub("[^A-Za-z0-9]", " ", t.lower()) for t in text]
text = [re.sub("\s+", " ", t) for t in text]
t_tokens = [t.split(" ") for t in text]
all_token_list = list(set(chain.from_iterable(t_tokens)))
all_token_dict = {}
for t in all_token_list:
all_token_dict.update({t : t})
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
all_token_dict = self.batch_apply(all_token_dict, self.stemmer.stem)
for i in xrange(0,len(t_tokens)):
for j in xrange(0,len(t_tokens[i])):
t_tokens[i][j] = all_token_dict.get(t_tokens[i][j], t_tokens[i][j])
new_text = [" ".join(t) for t in t_tokens]
return new_text
def generate_new_text(self, text):
no_punctuation = re.sub("[^A-Za-z0-9]", " ", text.lower())
no_punctuation = re.sub("\s+", " ", no_punctuation)
corrected = self.spell_correct_text(no_punctuation)
corrected = [self.stemmer.stem(w) for w in corrected]
new = " ".join(corrected)
return new
def get_vocab(self, input_text, input_scores, max_features):
train_mat = self.vectorizer1.transform(input_text)
input_score_med = np.median(input_scores)
new_scores = [0 if i<=input_score_med else 1 for i in input_scores]
ind_max_features = math.floor(max_features/max(input_scores))
all_vocab = []
all_cols = [np.asarray(train_mat.getcol(i).todense().transpose())[0] for i in xrange(0,train_mat.shape[1])]
for s in xrange(0,max(input_scores)):
sel_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]==s]
out_inds = [i for i in xrange(0,len(input_scores)) if input_scores[i]!=s]
pvalues = []
for i in xrange(0,len(all_cols)):
lcol = all_cols[i]
good_lcol = lcol[sel_inds]
bad_lcol = lcol[out_inds]
good_lcol_present = len(good_lcol[good_lcol > 0])
good_lcol_missing = len(good_lcol[good_lcol == 0])
bad_lcol_present = len(bad_lcol[bad_lcol > 0])
bad_lcol_missing = len(bad_lcol[bad_lcol == 0])
pval = pvalue(good_lcol_present, bad_lcol_present, good_lcol_missing, bad_lcol_missing)
pvalues.append(pval.two_tail)
col_inds = list(xrange(0,train_mat.shape[1]))
p_frame = pd.DataFrame(np.array([col_inds, pvalues]).transpose(), columns=["inds", "pvalues"])
p_frame = p_frame.sort(['pvalues'], ascending=True)
getVar = lambda searchList, ind: [searchList[int(i)] for i in ind]
vocab = getVar(self.vectorizer1.get_feature_names(), p_frame['inds'][:ind_max_features+2])
all_vocab.append(vocab)
return list(set(list(chain.from_iterable(all_vocab))))
def batch_get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
new_text = self.batch_generate_new_text(text)
text = [text[i] + new_text[i] for i in xrange(0,len(text))]
return (self.vectorizer.transform(text).todense())
def get_features(self, text):
if not self.fit_done:
raise Exception("Vectorizer has not been created.")
itext=text
if isinstance(text, list):
itext = text[0]
new_text = self.generate_new_text(itext)
if isinstance(text, list):
text = [text[0] + new_text]
else:
text = [text + new_text]
return (self.vectorizer.transform(text).todense())
class FeatureExtractor(Task):
data = Complex()
row_data = List()
speaker_code_dict = Dict()
speaker_codes = List()
vectorizer = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {'scriptfile' : os.path.abspath(os.path.join(settings.DATA_PATH, "script_tasks"))}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
scriptfile = kwargs.get('scriptfile')
script_data = pickle.load(open(scriptfile))
script = script_data.tasks[2].voice_lines.value
speakers = []
lines = []
for s in script:
for (i,l) in enumerate(s):
if i>0:
previous_line = s[i-1]['line']
previous_speaker = s[i-1]['speaker']
else:
previous_line = ""
previous_speaker = ""
if i>1:
two_back_speaker = s[i-2]['speaker']
else:
two_back_speaker = ""
if len(s)>i+1:
next_line = s[i+1]['line']
else:
next_line = ""
current_line = s[i]['line']
current_speaker = s[i]['speaker']
lines.append(current_line)
speakers.append(current_speaker)
row_data = {
'previous_line' : previous_line,
'previous_speaker' : previous_speaker,
'next_line' : next_line,
'current_line' : current_line,
'current_speaker' : current_speaker,
'two_back_speaker' : two_back_speaker
}
self.row_data.append(row_data)
self.speaker_code_dict = {k:i for (i,k) in enumerate(list(set(speakers)))}
self.speaker_codes = [self.speaker_code_dict[s] for s in speakers]
self.max_features = math.floor(MAX_FEATURES)/3
self.vectorizer = Vectorizer()
self.vectorizer.fit(lines, self.speaker_codes, self.max_features)
prev_features = self.vectorizer.batch_get_features([rd['previous_line'] for rd in self.row_data])
cur_features = self.vectorizer.batch_get_features([rd['current_line'] for rd in self.row_data])
next_features = self.vectorizer.batch_get_features([rd['next_line'] for rd in self.row_data])
self.speaker_code_dict.update({'' : -1})
meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], [self.speaker_code_dict[s['previous_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "previous_speaker", "current_speaker"])
#meta_features = make_df([[self.speaker_code_dict[s['two_back_speaker']] for s in self.row_data], self.speaker_codes],["two_back_speaker", "current_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features),meta_features],axis=1)
train_frame.index = range(train_frame.shape[0])
data = {
'vectorizer' : self.vectorizer,
'speaker_code_dict' : self.speaker_code_dict,
'train_frame' : train_frame,
'speakers' : make_df([speakers,self.speaker_codes, lines], ["speaker", "speaker_code", "line"]),
'data' : data,
'current_features' : cur_features,
}
return data
class RandomForestTrain(Train):
"""
A class to train a random forest
"""
colnames = List()
clf = Complex()
category = RegistryCategories.algorithms
namespace = get_namespace(__module__)
algorithm = RandomForestClassifier
args = {'n_estimators' : 300, 'min_samples_leaf' : 4, 'compute_importances' : True}
help_text = "Train and predict with Random Forest."
class KNNRF(Task):
data = Complex()
predictions = Complex()
importances = Complex()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
args = {'algo' : RandomForestTrain}
help_text = "Cleanup simpsons scripts."
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
from preprocess import CHARACTERS
vec_length = math.floor(MAX_FEATURES/3)
algo = kwargs.get('algo')
alg = algo()
train_data = data['train_frame'].iloc[:,:-1]
target = data['train_frame']['current_speaker']
clf = alg.train(train_data,target, **algo.args)
self.importances=clf.feature_importances_
test_data = data['data']
match_data = data['current_features']
reverse_speaker_code_dict = {data['speaker_code_dict'][k] : k for k in data['speaker_code_dict']}
speaker_list = []
speaker_codes = reverse_speaker_code_dict.keys()
for i in xrange(0,len(speaker_codes)):
s_text = "\n".join(list(data['speakers'][data['speakers']['speaker']==reverse_speaker_code_dict[speaker_codes[i]]]['line']))
speaker_list.append(s_text)
speaker_features = data['vectorizer'].batch_get_features(speaker_list)
self.predictions = []
counter = 0
for script in test_data['voice_script']:
counter+=1
log.info("On script {0} out of {1}".format(counter,len(test_data['voice_script'])))
lines = script.split("\n")
speaker_code = [-1 for i in xrange(0,len(lines))]
for (i,line) in enumerate(lines):
if i>0 and i%RESET_SCENE_EVERY!=0:
previous_line = lines[i-1]
previous_speaker = speaker_code[i-1]
else:
previous_line = ""
previous_speaker= -1
if i>1 and i%RESET_SCENE_EVERY!=0:
two_back_speaker = speaker_code[i-2]
else:
two_back_speaker = -1
if i<(len(lines)-1):
next_line = lines[i+1]
else:
next_line = ""
prev_features = data['vectorizer'].get_features(previous_line)
cur_features = data['vectorizer'].get_features(line)
next_features = data['vectorizer'].get_features(next_line)
meta_features = make_df([[two_back_speaker], [previous_speaker]],["two_back_speaker", "previous_speaker"])
#meta_features = make_df([[two_back_speaker]],["two_back_speaker"])
train_frame = pd.concat([pd.DataFrame(prev_features),pd.DataFrame(cur_features),pd.DataFrame(next_features), meta_features],axis=1)
speaker_code[i] = alg.predict(train_frame)[0]
nearest_match, distance = self.find_nearest_match(cur_features, speaker_features)
if distance<CHARACTER_DISTANCE_MIN:
sc = speaker_codes[nearest_match]
speaker_code[i] = sc
continue
for k in CHARACTERS:
for c in CHARACTERS[k]:
if c in previous_line:
speaker_code[i] = data['speaker_code_dict'][k]
nearest_match, distance = self.find_nearest_match(cur_features,match_data)
if distance<DISTANCE_MIN:
sc = data['speakers']['speaker_code'][nearest_match]
speaker_code[i] = sc
continue
df = make_df([lines,speaker_code,[reverse_speaker_code_dict[round(s)] for s in speaker_code]],["line","speaker_code","speaker"])
self.predictions.append(df)
return data
def find_nearest_match(self, features, matrix):
features = np.asarray(features)
distances = [self.euclidean(u, features) for u in matrix]
nearest_match = distances.index(min(distances))
return nearest_match, min(distances)
def euclidean(self, v1, v2):
return np.sqrt(np.sum(np.square(np.subtract(v1,v2))))
"""
p = tasks[3].predictions.value
speakers = []
lines = []
for pr in p:
speakers.append(list(pr['speaker']))
lines.append(list(pr['line']))
from itertools import chain
speakers = list(chain.from_iterable(speakers))
lines = list(chain.from_iterable(lines))
rows = []
for (s,l) in zip(speakers, lines):
rows.append({
'speaker' : s,
'line': l,
})
import json
json.dump(rows,open("/home/vik/vikparuchuri/simpsons-scripts/data/final_voice.json","w+"))
""" | apache-2.0 |
nrhine1/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
m4734/mysql_pio | boost_1_59_0/libs/numeric/odeint/performance/plot_result.py | 43 | 2225 | """
Copyright 2011-2014 Mario Mulansky
Copyright 2011-2014 Karsten Ahnert
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
"""
import numpy as np
from matplotlib import pyplot as plt
plt.rc("font", size=16)
def get_runtime_from_file(filename):
gcc_perf_file = open(filename, 'r')
for line in gcc_perf_file:
if "Minimal Runtime:" in line:
return float(line.split(":")[-1])
t_gcc = [get_runtime_from_file("perf_workbook/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_gcc.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_gcc.perf")]
t_intel = [get_runtime_from_file("perf_workbook/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_ariel/odeint_rk4_array_intel.perf"),
get_runtime_from_file("perf_lyra/odeint_rk4_array_intel.perf")]
t_gfort = [get_runtime_from_file("perf_workbook/rk4_gfort.perf"),
get_runtime_from_file("perf_ariel/rk4_gfort.perf"),
get_runtime_from_file("perf_lyra/rk4_gfort.perf")]
t_c_intel = [get_runtime_from_file("perf_workbook/rk4_c_intel.perf"),
get_runtime_from_file("perf_ariel/rk4_c_intel.perf"),
get_runtime_from_file("perf_lyra/rk4_c_intel.perf")]
print t_c_intel
ind = np.arange(3) # the x locations for the groups
width = 0.15 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, t_gcc, width, color='b', label="odeint gcc")
rects2 = ax.bar(ind+width, t_intel, width, color='g', label="odeint intel")
rects3 = ax.bar(ind+2*width, t_c_intel, width, color='y', label="C intel")
rects4 = ax.bar(ind+3*width, t_gfort, width, color='c', label="gfort")
ax.axis([-width, 2.0+5*width, 0.0, 0.85])
ax.set_ylabel('Runtime (s)')
ax.set_title('Performance for integrating the Lorenz system')
ax.set_xticks(ind + 1.5*width)
ax.set_xticklabels(('Core i5-3210M\n3.1 GHz',
'Xeon E5-2690\n3.8 GHz',
'Opteron 8431\n 2.4 GHz'))
ax.legend(loc='upper left', prop={'size': 16})
plt.savefig("perf.pdf")
plt.savefig("perf.png", dpi=50)
plt.show()
| gpl-2.0 |
hadim/spindle_tracker | spindle_tracker/tracking/begin_mitosis_tracker.py | 1 | 7324 | import gc
import logging
import numpy as np
import pandas as pd
import scipy
from skimage import measure
from ..trajectories import Trajectories
from ..tracker.solver import ByFrameSolver
from ..io import TiffFile
from ..tracking import Tracker
log = logging.getLogger(__name__)
class BeginMitosisTracker(Tracker):
ANNOTATIONS = {'start_mitosis': (-1, None, float),
'state': (0, [0, 1, 2], None)}
def __init__(self, *args, **kwargs):
"""
"""
super().__init__(*args, **kwargs)
if hasattr(self, 'line_size'):
self.line_size = pd.Series(self.line_size)
def track_poles(self, force=False):
"""
"""
if force or not hasattr(self, 'poles'):
poles = self.get_peaks_from_trackmate()
poles = poles.groupby(level='t_stamp').filter(lambda x: len(x) == 2)
poles = Trajectories(poles)
solver = ByFrameSolver.for_brownian_motion(poles,
max_speed=1e10, coords=['x', 'y'])
poles = solver.track(progress_bar=True)
poles = poles.project([0, 1], keep_first_time=False,
reference=None, inplace=False, progress=True)
self.save(poles, 'poles')
def get_line_profiles(self, lw=0.7, force=False):
"""
"""
if force or not hasattr(self, 'line_profiles'):
# Get image
tf = self.get_tif()
im = tf.asarray()
tf.close()
md = self.metadata
# Z projection
id_z = md['DimensionOrder'].index('Z')
im = im.max(axis=id_z)
# Get GFP channel
id_c = im.shape.index(2)
try:
id_ndc80 = md['Channels'].index('GFP')
except:
id_ndc80 = 0
gfp_im = im.take(id_ndc80, axis=id_c)
gfp_im = gfp_im / np.median(gfp_im)
del im
gc.collect()
gfp_im = (gfp_im - gfp_im.min()) / (gfp_im.max() - gfp_im.min())
lw_pixel = lw / md['PhysicalSizeX']
line_profiles = {}
line_size = {}
for t_stamp, p in self.poles.groupby(level='t_stamp'):
a = gfp_im[t_stamp]
scaled_p = p.copy()
scaled_p.loc[:, ['x', 'y', 'w']] /= md['PhysicalSizeX']
p1 = scaled_p.iloc[0][['y', 'x']]
p2 = scaled_p.iloc[1][['y', 'x']]
lp = measure.profile_line(a, p1, p2, linewidth=lw_pixel)
line_profiles[t_stamp] = lp
line_size[t_stamp] = scipy.spatial.distance.cdist(np.atleast_2d(p1.values), np.atleast_2d(p2.values))[0, 0]
line_size[t_stamp] *= self.metadata['PhysicalSizeX']
del gfp_im
del tf
gc.collect()
line_profiles = pd.DataFrame.from_dict(line_profiles, orient='index')
line_size = pd.Series(line_size)
self.save(line_profiles, 'line_profiles')
self.save(line_size, 'line_size')
def get_figure(self, figsize=(13, 8)):
"""
"""
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
fig, ax = plt.subplots(figsize=figsize)
pole_1 = self.poles.loc[pd.IndexSlice[:, 0], ]
pole_2 = self.poles.loc[pd.IndexSlice[:, 1], ]
ax.plot(pole_1['t'], pole_1 ['x_proj'], c='black', marker='o')
ax.plot(pole_2['t'], pole_2['x_proj'], c='black', marker='o')
precision = 1000
linewidth = 6
alpha = 1
norm = plt.Normalize(0.0, 1.0)
cmap = plt.get_cmap('Reds')
#cmap.set_gamma(2)
for t_stamp, p in self.poles.groupby(level='t_stamp'):
lp = self.line_profiles.loc[t_stamp]
p1 = p.iloc[0][['x_proj']].values[0]
p2 = p.iloc[1][['x_proj']].values[0]
x = np.repeat(p['t'].unique()[0], precision)
y = np.linspace(p1, p2, num=precision)
# Get color vector according to line profile
lp = lp.dropna().values
lp = (lp - lp.min()) / (lp.max() - lp.min())
x_lp = np.arange(0, len(lp))
new_x_lp = np.linspace(0, len(lp) - 1, precision)
z = np.interp(new_x_lp, x_lp, lp)
# Make segments
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)
ax.add_collection(lc)
return fig
def get_figure_publi(self, figsize, tmin, tmax, shift_time=0):
"""
"""
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
fig, ax = plt.subplots(figsize=figsize)
poles = self.poles[self.poles['t'] < tmax]
poles = poles[self.poles['t'] > tmin]
poles['t'] = poles['t'] - self.poles.loc[self.annotations['start_mitosis'], 't'].iloc[0]
poles['t'] = poles['t'] / 60
poles['t'] -= shift_time
poles = poles[poles['t'] > 0]
poles = poles[poles['t'] < 7]
pole_1 = poles.loc[pd.IndexSlice[:, 0], ]
pole_2 = poles.loc[pd.IndexSlice[:, 1], ]
times = pole_1['t'].values
ax.plot(times, pole_1 ['x_proj'], color='#000000', marker='o')
ax.plot(times, pole_2['x_proj'], color='#000000', marker='o')
precision = 1000
linewidth = 6
alpha = 1
norm = plt.Normalize(0.0, 1.0)
cmap = plt.get_cmap('Reds')
for t_stamp, p in poles.groupby(level='t_stamp'):
lp = self.line_profiles.loc[t_stamp]
p1 = p.iloc[0][['x_proj']].values[0]
p2 = p.iloc[1][['x_proj']].values[0]
x = np.repeat(p['t'].unique()[0], precision)
y = np.linspace(p1, p2, num=precision)
# Get color vector according to line profile
lp = lp.dropna().values
lp = (lp - lp.min()) / (lp.max() - lp.min())
x_lp = np.arange(0, len(lp))
new_x_lp = np.linspace(0, len(lp) - 1, precision)
z = np.interp(new_x_lp, x_lp, lp)
# Make segments
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)
ax.add_collection(lc)
ax.set_xticks(np.arange(times[0], times[-1], 2))
ax.set_xlim(times[0], times[-1])
ax.set_yticks(np.arange(-2, 2, 1))
ax.set_ylim(-1.5, 1.5)
nullform = matplotlib.ticker.FuncFormatter(lambda x, y: "")
ax.xaxis.set_major_formatter(nullform)
ax.yaxis.set_major_formatter(nullform)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for i in ax.spines.values():
i.set_linewidth(4)
i.set_color('black')
ax.grid(b=True, which='major', color='#000000', linestyle='-', alpha=0.2, lw=2)
plt.tight_layout()
return fig
| bsd-3-clause |
datapythonista/pandas | pandas/tests/indexes/timedeltas/test_searchsorted.py | 4 | 1040 | import numpy as np
import pytest
from pandas import (
Series,
TimedeltaIndex,
Timestamp,
array,
)
import pandas._testing as tm
class TestSearchSorted:
@pytest.mark.parametrize("klass", [list, np.array, array, Series])
def test_searchsorted_different_argument_classes(self, klass):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
result = idx.searchsorted(klass(idx))
expected = np.arange(len(idx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
result = idx._data.searchsorted(klass(idx))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
)
def test_searchsorted_invalid_argument_dtype(self, arg):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got"
with pytest.raises(TypeError, match=msg):
idx.searchsorted(arg)
| bsd-3-clause |
binghongcha08/pyQMD | GWP/2D/1.0.2/plt.py | 14 | 1041 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
plt.subplot(2,1,1)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[-1]):
plt.plot(data[:,0],data[:,x])
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlabel('time')
plt.ylabel('$x_i$')
plt.title('traj')
plt.subplot(2,1,2)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[-1]):
plt.plot(data[:,0],data[:,x])
plt.xlabel('$time$')
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
MikeDMorgan/gwas_pipeline | scripts/snpPriority.py | 1 | 11661 | '''
snpPriority.py - score SNPs based on their LD score and SE weighted effect sizes
===============================================================================
:Author: Mike Morgan
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. Score SNPs based on their LD score and SE weighted effect sizes from
association analysis.
Usage
-----
.. Example use case
Example::
python snpPriority.py
Type::
python snpPriority.py --help
for command line help.
Command line options
--------------------
'''
import sys
import CGAT.Experiment as E
import PipelineGWAS as gwas
import re
import pandas as pd
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("--score-method", dest="method", type="choice",
choices=["PICS", "LDscore", "ABF", "R2_rank",
"get_eigen", "calc_prior", "credible_set",
"summarise"],
help="SNP scoring/prioritisation method to apply.")
parser.add_option("--database", dest="database", type="string",
help="SQL database containing LD information "
"in table format. Expects columns SNP_A, "
"SNP_B, R2, BP_A and BP_B (Plink --r2 output)")
parser.add_option("--ld-directory", dest="ld_dir", type="string",
help="directory containing tabix-index BGZIP "
"LD files. Assumes Plink used to calculate LD")
parser.add_option("--table-name", dest="table", type="string",
help="name of the SQL table containing the LD"
"values")
parser.add_option("--chromosome", dest="chromosome", type="string",
help="chromosome to subset the association results "
"file on")
parser.add_option("--ld-threshold", dest="ld_threshold", type="float",
help="the threshold of LD above which variants will "
"be taken forward.")
parser.add_option("--rank-threshold", dest="rank_threshold", type="float",
help="the threshold in terms of the top n% SNPs to "
"output based on the ranking metric. e.g. "
"--rank-threshold=0.01 is the top 1% SNPs")
parser.add_option("--credible-interval", dest="interval", type="float",
help="The credible set interval size to generate the "
"credible set of SNPs")
parser.add_option("--prior-variance", dest="prior_var", type="float",
help="the prior variance used to weight the SNP "
"variance")
parser.add_option("--fine-map-window", dest="map_window", type="int",
help="the region size to included around the index "
"SNP as the fine-mapping region.")
parser.add_option("--eigen-score-directory", dest="eigen_dir", type="string",
help="PATH to directory containing tabix indexed "
"eigen score files")
parser.add_option("--flat-prior", dest="flat_prior", action="store_true",
help="Ignore functional annotation information and "
"use an uninformative prior on each SNP")
parser.add_option("--snp-set", dest="snp_set", type="string",
help="Pre-defined SNP set as a list of SNP IDs."
"If used to calculate priors contains column of scores.")
parser.add_option("--distribution", dest="dist", type="choice",
choices=["normal", "t", "gamma", "lognormal",
"exponential"],
help="distribution from which to draw prior "
"probabilities")
parser.add_option("--distribution-parameters", dest="dist_params", type="string",
help="distribution parameters as a comma-separated list")
parser.add_option("--lead-snp-id", dest="lead_snp", type="int",
help="0-based item number in filename")
parser.add_option("--filename-separator", dest="separator", type="string",
help="filename separator to extract information")
parser.add_option("--snp-column", dest="snp_col", type="int",
help="0-based index of SNP ID column number")
parser.add_option("--probability-column", dest="prob_col", type="int",
help="0-based index of posterior probabilities column"
" number")
parser.set_defaults(ld_dir=None,
dist="normal",
dist_params=None,
snp_set=None,
prior_var=0.04,
interval=0.99,
eigen_dir=None,
map_window=100000,
ld_threshold=0.5,
database=None,
table=None,
flat_prior=False,
lead_snp=2,
separator="_",
snp_col=0,
prob_col=1,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
infile = argv[-1]
if len(infile.split(",")) > 1:
pass
else:
peek = pd.read_table(infile, nrows=5, sep="\s*", header=0)
try:
if len(peek["TEST"] != "ADD"):
clean = False
else:
clean = True
except KeyError:
clean = True
if options.method == "LDscore":
snpscores = gwas.snpPriorityScore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
ld_dir=options.ld_dir,
clean=clean)
# take top 1%, all SNPs doesn't achieve anything useful
ranks = int(len(snpscores.index) * 0.01)
snpscores = snpscores.iloc[:ranks]
elif options.method == "PICS":
snp_list = {}
if options.snp_set and not options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
try:
score = float(line.split("\t")[-1].rstrip("\n"))
except ValueError:
score = 0
snp_list[snp] = float(score)
# get the parameter estimates for the distribution
# if they have not been provided
if not options.dist_params:
dist_params = gwas.estimateDistributionParameters(data=snp_list.values(),
distribution=options.dist)
else:
dist_params = tuple([float(fx) for fx in options.dist_params.split(",")])
E.info("Calculating priors on SNPs")
priors = gwas.calcPriorsOnSnps(snp_list=snp_list,
distribution=options.dist,
params=dist_params)
elif options.snp_set and options.flat_prior:
with IOTools.openFile(options.snp_set, "r") as sfile:
for line in sfile.readlines():
snp = line.split("\t")[0]
snp_list[snp] = 1.0
priors = snp_list
else:
# allow for no priors or scores to be set,
# use of priors will be ignored,
# i.e. when prior and likelihood are not from
# conjugate distributions
priors = None
# PICS scores expects the gwas results file to
# only contain the region of interest, which
# represents an independent association signal
snpscores = gwas.PICSscore(gwas_results=infile,
database=options.database,
table_name=options.table,
chromosome=options.chromosome,
priors=priors,
clean=clean,
ld_dir=options.ld_dir,
ld_threshold=options.ld_threshold)
snpscores.columns = ["SNP", "PICS"]
posterior_sum = 0
snpscores.sort_values(ascending=False,
inplace=True)
post_snps = []
for snp in snpscores.index:
if posterior_sum < 99.0:
posterior_sum += snpscores.loc[snp]
post_snps.append(snp)
else:
break
snpscores = snpscores.loc[post_snps]
snpscores.drop_duplicates(inplace=True)
elif options.method == "R2_rank":
# rank SNPs based on their LD with the lead
# SNP, take the top n% SNPs
snpscores = gwas.LdRank(gwas_results=infile,
database=options.database,
table_name=options.table,
ld_dir=options.ld_dir,
chromosome=options.chromosome,
ld_threshold=options.ld_threshold,
top_snps=options.rank_threshold,
clean=clean)
elif options.method == "ABF":
snpscores = gwas.ABFScore(gwas_results=infile,
region_size=options.map_window,
chromosome=options.chromosome,
prior_variance=options.prior_var,
clean=clean)
elif options.method == "get_eigen":
E.info("Fetching Eigen scores")
snpscores = gwas.getEigenScores(eigen_dir=options.eigen_dir,
bim_file=infile,
snp_file=options.snp_set)
snpscores = pd.DataFrame(snpscores).T
elif options.method == "credible_set":
E.info("Creating credible set")
snpscores = gwas.makeCredibleSet(probs_file=infile,
credible_set=options.interval,
lead_snp_indx=options.lead_snp,
filename_sep=options.separator,
snp_column=options.snp_col,
probs_column=options.prob_col)
elif options.method == "summarise":
E.info("Collating SNP prioritisation resuslts")
file_list = infile.split(",")
snpscores = gwas.summariseResults(file_list=file_list)
snpscores.to_csv(options.stdout, index_label="SNP",
sep="\t")
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
stefanodoni/mtperf | main.py | 2 | 18296 | #!/usr/bin/python3
import os
import argparse
import csv
import sqlite3
import sqlalchemy as sqlal
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from database import DBConstants
from datasets.BenchmarkDataset import BenchmarkDataset
from graph_plotters.HTModelPlotter import HTModelPlotter
from parsers.SarParser import SarParser
from parsers.PCMParser import PCMParser
from parsers.BenchmarkParser import BenchmarkParser
from parsers.PerfParser import PerfParser
from parsers.SysConfigParser import SysConfigParser
from statistics.HTLinearModel import HTLinearModel
from config.SUTConfig import SUTConfig
import config.BenchmarkAnalysisConfig as bac
parser = argparse.ArgumentParser(description='HTperf tool: parse, aggregate, select and plot data.')
parser.add_argument('benchmarkdirpath', metavar='benchmarkdirpath', help='path to directory containing n benchmark report directories, each one containing the csv report files')
parser.add_argument('reportdirpath', metavar='reportdirpath', help='path to directory in which the tool generates the reports')
parser.add_argument('-pcm', help='indicates if a pcm.csv file must be parsed', dest='pcm', action='store_true')
parser.add_argument('-sysconfig', help='indicates if a sysConfig.csv file must be parsed', dest='sysconfig', action='store_true')
parser.add_argument('--chart-no-legend', help='do not include legend in charts', action='store_true', default=False)
parser.add_argument('--chart-no-model', help='do not include regression model in charts', action='store_true')
parser.add_argument('--chart-xmax', help='max value of the throughput axis in charts', type=int, default=None)
parser.add_argument('--chart-umax', help='max value of the utilization axis in charts', type=int, default=None)
parser.add_argument('--chart-line-p-max', help='max value of the extrapolation line for productivity in charts', type=int, default=None)
parser.add_argument('--chart-line-u-max', help='max value of the extrapolation line for utilization in charts', type=int, default=None)
args = parser.parse_args()
# Settings
using_pcm = args.pcm
using_sysconfig = args.sysconfig
# Get the chosen output dir and create it if necessary
OUTPUT_DIR = os.path.join(args.reportdirpath, '')
os.makedirs(os.path.dirname(OUTPUT_DIR), exist_ok=True)
# Set path and file names
path_to_tests = args.benchmarkdirpath
test_names = [name for name in os.listdir(path_to_tests) if not os.path.isfile(path_to_tests + "/" + name)]
test_names.sort()
test_numbers = [i + 1 for i in range(len(test_names))]
# benchmark_detailed_file = "/benchmark-detailed.csv"
benchmark_file = "/benchmark.csv"
sar_file = "/sar.csv"
pcm_file = "/pcm.csv"
perf_file = "/perf.csv"
sysconfig_file = "/sysConfig.csv"
# Create output directory
for test in test_names:
os.makedirs(os.path.dirname(OUTPUT_DIR + test + '/'), exist_ok=True)
# Create DB file and empty it
open(DBConstants.DB_NAME, 'w').close()
# Data structures
system_config = {}
benchmark_dataframes = {}
benchmark_SUTconfigs = {}
sar_dataframes = {}
pcm_dataframes = {}
perf_dataframes = {}
benchmark_datasets = {}
ht_linear_models = {}
# ======================= DATA IMPORT =============================
if not using_sysconfig:
my_sut_config = SUTConfig()
my_sut_config.set_manual()
for test in test_names:
# benchmark_detailed_dataframe = BenchmarkParser().parse(benchmark_detailed_file, "detailed") # Only if using the detailed version of benchmark report file
benchmark_dataframes[test] = BenchmarkParser().parse(path_to_tests + '/' + test + benchmark_file)
sar_dataframes[test] = SarParser().parse(path_to_tests + '/' + test + sar_file)
perf_dataframes[test] = PerfParser().parse(path_to_tests + '/' + test + perf_file)
if using_sysconfig:
print("Setting SysConfig file of test: " + test)
system_config = SysConfigParser().parse(path_to_tests + '/' + test + sysconfig_file)
benchmark_SUTconfigs[test] = SUTConfig()
benchmark_SUTconfigs[test].set(system_config)
if using_pcm:
pcm_dataframes[test] = PCMParser().parse(path_to_tests + '/' + test + pcm_file)
# ======================= PERSIST DATA IN SQLITE ====================
conn = sqlite3.connect(DBConstants.DB_NAME)
c = conn.cursor()
for test in test_names:
#benchmark_detailed_dataframe.to_sql(DBConstants.BENCHMARK_DETAILED_TABLE, conn)
benchmark_dataframes[test].to_sql(DBConstants.BENCHMARK_TABLE, conn, if_exists='append')
sar_dataframes[test].to_sql(DBConstants.SAR_TABLE, conn, if_exists='append')
perf_dataframes[test].to_sql(DBConstants.PERF_TABLE, conn, if_exists='append')
if using_pcm:
pcm_dataframes[test].to_sql(DBConstants.PCM_TABLE, conn, if_exists='append')
conn.commit()
# c.execute("DROP TABLE IF EXISTS " + DBConstants.BENCHMARK_DETAILED_TABLE)
# Query to show table fields: PRAGMA table_info(tablename)
# for row in c.execute("PRAGMA table_info(perf)"):
# print(row)
# for row in c.execute("SELECT * FROM " + DBConstants.PERF_TABLE):
# print(row)
# c.execute("SELECT * FROM prova")
# print(c.fetchone())
#print(pd.read_sql_query("SELECT * FROM " + DBConstants.BENCHMARK_TABLE, conn))
#print(pd.read_sql_query("SELECT * FROM benchmark WHERE \"Timestamp Start\" < \"2015-10-11 08:14:18\"", conn))
# c.execute("DROP TABLE IF EXISTS prova")
# c.execute("CREATE TABLE prova (c1, c2, asd TEXT)")
# c.execute("INSERT INTO prova VALUES (5,3,4)")
for test in test_names:
benchmark_datasets[test] = BenchmarkDataset().create(benchmark_dataframes[test], conn, OUTPUT_DIR, test, using_pcm)
conn.close()
# Alternative to sqlite3: SQLAlchemy in order to use pd.read_sql_table
#engine = sqlal.create_engine('sqlite:///htperf.db')
#print(pd.read_sql_table('benchmark', engine))
#print(pd.read_sql_query("SELECT * FROM benchmark WHERE \"Timestamp Start\" <= \"2015-10-11 08:14:18\"", engine))
# ======================= STATISTICS =====================================
for test in test_names:
if using_sysconfig:
ht_linear_models[test] = HTLinearModel().estimate(benchmark_datasets[test], OUTPUT_DIR, test, benchmark_SUTconfigs[test])
else:
ht_linear_models[test] = HTLinearModel().estimate(benchmark_datasets[test], OUTPUT_DIR, test, my_sut_config)
### Full Dump of benchmark, perf and models data to CSV
for test in test_names:
benchmark_datasets[test]['perf-stats']['mean'].to_csv("mtperf-perf-dump-" + test + ".csv", sep=";")
benchmark_datasets[test]['runs']['XavgTot'].to_csv("mtperf-bench-dump-" + test + ".csv", sep=";")
ht_linear_models[test].Sys_mean_real_IPC.to_csv("mtperf-models-realIPC-dump-" + test + ".csv", sep=";")
# ======================= PLOT GRAPHS =====================================
# colors = ['#E12727', '#504FAF', '#088DA5', '#FE9900', '#12AD2A'] #281D46
colors = ['#0041CC', '#FF0000', '#E6C700', '#FF00BF', '#00CC22']
colors_second_ax = ['#f0f465', '#9cec5b', '#50c5b7', '#6184d8', '#533a71']
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0, color, (None if len(test_names) > 1 else "CPU Utilization \\%"), False, False, 0, 100)
if not args.chart_no_model:
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
color = (colors[1] if len(test_names) == 1 else color)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0, color, (test if len(test_names) > 1 else "Utilization Law"), False, False, 0, 100)
plotter.gen_graph("U-vs-X", "",
#"Stima dell'Utilizzo (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Utilization \\%'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 1, 0, color, (None if len(test_names) > 1 else "Productivity"), False, True)#, 0, 100)
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
color = (colors[1] if len(test_names) == 1 else color)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 1, 0, color, (test if len(test_names) > 1 else "Linear Regression"), False, True)#, 0, 100)
plotter.gen_graph("P-vs-X", "",
#""Stima della Productivity (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Productivity \\%'}, None, None, True)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0,
(colors[0] if len(test_names) == 1 else color), (None if len(test_names) > 1 else "Utilization"),
False, False)#, 0, 100)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 0, 0,
(colors[1] if len(test_names) == 1 else color), (None if len(test_names) > 1 else "Productivity"),
False, True)#, 0, 100)
if not args.chart_no_model:
# Then use the x_max value to print the lr lines
for test, color in zip(test_names, colors):
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_utilization, 0, 0,
(colors[0] if len(test_names) == 1 else color),
(test if len(test_names) > 1 else "Utilization Law"), False, False, x_line_max=args.chart_line_u_max)#, 0, 100)#, False)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_productivity, 0, 0,
(colors[1] if len(test_names) == 1 else color),
(test if len(test_names) > 1 else "Extrapolated Prod."), False, True, x_line_max=args.chart_line_p_max)
plotter.gen_graph("U,P-vs-X", "",
#"Stima dell'Utilizzo (sui primi " + str(bac.NUM_SAMPLES) + " campioni)" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Throughput (req/sec)'}, {0: 'Utilization \\%, Productivity \\%'}, X_axis_max=args.chart_xmax, legend_inside_graph=True, include_legend=not args.chart_no_legend)
## plotter = HTModelPlotter().init(OUTPUT_DIR, 2)
# # First plot scatter and standard points in order to determinate the maximum X value
# for test, color in zip(test_names, colors):
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, test + '\nTot Avg Response Time (ms)')
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_atd, 1, 0, color, test + '\nTot Avg Thread Concurrency', False, False, 1, 2)
#
# plotter.gen_graph("R,atc-vs-X", bac.TITLE, {0: 'Throughput', 1: 'Throughput'}, {0: 'Tot Avg Response Time (ms)', 1: 'Tot Avg Thread Concurrency'})
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_atd, 0, 0, color, (test if len(test_names) > 1 else "ATC"), False, False, 1, 2)
#
#plotter.gen_graph("Atc-vs-X", "",
# #"Andamento dell'Average Thread Concurrency" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Throughput'}, {0: 'Average Thread Concurrency'}, None, None, True)
#
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
# First plot scatter and standard points in order to determinate the maximum X value
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(ht_linear_models[test].Sys_mean_utilization, benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, (test if len(test_names) > 1 else "Response Time (ms)"))
plotter.gen_graph("R-vs-U", "",
#"Andamento del Response Time rispetto all'Utilizzo" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
{0: 'Utilization \\%'}, {0: 'Response Time (ms)'}, X_axis_max=args.chart_umax, include_legend=not args.chart_no_legend)
#
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter(ht_linear_models[test].Sys_mean_productivity, benchmark_datasets[test]['runs']['RavgTot'], 0, 0, color, (test if len(test_names) > 1 else "Response Time (ms)"), True)
#
#plotter.gen_graph("R-vs-P", "",
# #"Andamento del Response Time rispetto alla Productivity" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Productivity'}, {0: 'Response Time (ms)'}, None, 140, True)
#
#plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
## First plot scatter and standard points in order to determinate the maximum X value
#for test, color in zip(test_names, colors):
# if using_sysconfig:
# my_sut_config = benchmark_SUTconfigs[test]
#
# color = (colors[0] if len(test_names) == 1 else color)
# plotter.plot_scatter( ht_linear_models[test].Sys_mean_active_frequency, 0, 0,
# color, (test if len(test_names) > 1 else "AFREQ (GHz)"),
# False, False, 0, (my_sut_config.CPU_MAX_FREQUENCY_ALL_CORES_BUSY + 600000000))
#
#plotter.gen_graph("AFREQ-vs-X", "",
# #"Andamento dell'Active Frequency" + "\n" + bac.BENCHMARK,# + "\n" + bac.SUT,
# {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, None, None, True, "lower right")
#
benchX = pd.Series([15633,30742,45689,60752,75282,90151,105483,120570,136335,148312])
#afreq = pd.Series([1.2863893771,1.7623052723,2.1674793625,2.4566290458,2.6498259159,2.7822519266,2.8569867656,2.896732531,2.9050008713,2.8996203862])
#core_busy_time = pd.Series([ 0.112894609, 0.2221528827, 0.3224394861, 0.4312730359, 0.539689001, 0.6395914782, 0.7470188007, 0.8404833952, 0.9391003009, 1])
instr = pd.Series([188.7400993175, 368.113962475, 542.7210293267, 718.3456908025, 892.9922278983, 1061.2639747475, 1246.3635704375, 1423.1804586467, 1610.9732021967, 1754.9474657242])
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
for test, color in zip(test_names, colors):
color = (colors[0] if len(test_names) == 1 else color)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, color, "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, color, "AFREQ", False, False, 0, None)
plotter.gen_graph("AFREQ-vs-X", "", {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
for test in test_names:
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, colors[0] , "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_active_frequency, 0, 0, colors[1], "AFREQ", False, False, 0, None)
plotter.gen_graph(test + "-AFREQ-vs-X", "", {0: 'Throughput'}, {0: 'Active Frequency (GHz)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['perf-stats']['mean']['CPU0_cpu_clk_unhalted_thread_any'] , 0, 0, colors[0] , "test chart", False, False, 0, None)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], benchmark_datasets[test]['perf-stats']['mean']['CPU0_cpu_clk_unhalted_thread_any'], 0, 0, colors[1], "Core unhalted cycles", False, False, 0, None)
plotter.gen_graph(test + "-CUC-vs-X", "", {0: 'Throughput'}, {0: 'Core Unhalted Cycles'}, None, None, True, "lower right", False)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_real_IPC, 0, 0, colors[0], "chart", False, False, 0, None)
plotter.plot_lin_regr(benchmark_datasets[test]['runs']['XavgTot'], ht_linear_models[test].Sys_mean_real_IPC, 0, 0, colors[1], "Instructions per cycle", False, False, 0, None)
plotter.gen_graph(test + "-IPC-vs-X", "", {0: 'Throughput'}, {0: 'Instructions per cycle'}, None, None, True, "lower right", False)
plotter = HTModelPlotter().init(OUTPUT_DIR, 1)
plotter.plot_scatter(benchX, instr, 0, 0, colors[0] , "test chart", False, False, 0, None)
if not args.chart_no_model:
plotter.plot_lin_regr(benchX, instr, 0, 0, colors[1], "Retired instructions (Millions/sec)", False, False, 0, None)
#plotter.gen_graph("INSTR-vs-X", "", {0: 'Throughput'}, {0: 'Retired instructions (Millions/sec)'}, None, None, True, "lower right", False)
plotter.gen_graph("INSTR-vs-X", "", {0: 'Throughput'}, {0: 'Retired instructions (Millions/sec)'}, X_axis_max=args.chart_xmax, include_legend=not args.chart_no_legend)
| gpl-2.0 |
Delphine-L/tools-iuc | tools/repmatch_gff3/repmatch_gff3_util.py | 22 | 17958 | import bisect
import csv
import os
import shutil
import sys
import tempfile
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Graph settings
Y_LABEL = 'Counts'
X_LABEL = 'Number of matched replicates'
TICK_WIDTH = 3
# Amount to shift the graph to make labels fit, [left, right, top, bottom]
ADJUST = [0.180, 0.9, 0.9, 0.1]
# Length of tick marks, use TICK_WIDTH for width
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
COLORS = 'krb'
ISPY2 = sys.version_info[0] == 2
class Replicate(object):
def __init__(self, id, dataset_path):
self.id = id
self.dataset_path = dataset_path
if ISPY2:
fh = open(dataset_path, 'rb')
else:
fh = open(dataset_path, 'r', newline='')
self.parse(csv.reader(fh, delimiter='\t'))
def parse(self, reader):
self.chromosomes = {}
for line in reader:
if line[0].startswith("#") or line[0].startswith('"'):
continue
cname, junk, junk, mid, midplus, value, strand, junk, attrs = line
attrs = parse_gff_attrs(attrs)
distance = int(attrs['cw_distance'])
mid = int(mid)
midplus = int(midplus)
value = float(value)
if cname not in self.chromosomes:
self.chromosomes[cname] = Chromosome(cname)
chrom = self.chromosomes[cname]
chrom.add_peak(Peak(cname, mid, value, distance, self))
for chrom in self.chromosomes.values():
chrom.sort_by_index()
def filter(self, up_limit, low_limit):
for chrom in self.chromosomes.values():
chrom.filter(up_limit, low_limit)
def size(self):
return sum([len(c.peaks) for c in self.chromosomes.values()])
class Chromosome(object):
def __init__(self, name):
self.name = name
self.peaks = []
def add_peak(self, peak):
self.peaks.append(peak)
def sort_by_index(self):
self.peaks.sort(key=lambda peak: peak.midpoint)
self.keys = make_keys(self.peaks)
def remove_peak(self, peak):
i = bisect.bisect_left(self.keys, peak.midpoint)
# If the peak was actually found
if i < len(self.peaks) and self.peaks[i].midpoint == peak.midpoint:
del self.keys[i]
del self.peaks[i]
def filter(self, up_limit, low_limit):
self.peaks = [p for p in self.peaks if low_limit <= p.distance <= up_limit]
self.keys = make_keys(self.peaks)
class Peak(object):
def __init__(self, chrom, midpoint, value, distance, replicate):
self.chrom = chrom
self.value = value
self.midpoint = midpoint
self.distance = distance
self.replicate = replicate
def normalized_value(self, med):
return self.value * med / self.replicate.median
class PeakGroup(object):
def __init__(self):
self.peaks = {}
def add_peak(self, repid, peak):
self.peaks[repid] = peak
@property
def chrom(self):
return list(self.peaks.values())[0].chrom
@property
def midpoint(self):
return int(median([peak.midpoint for peak in self.peaks.values()]))
@property
def num_replicates(self):
return len(self.peaks)
@property
def median_distance(self):
return int(median([peak.distance for peak in self.peaks.values()]))
@property
def value_sum(self):
return sum([peak.value for peak in self.peaks.values()])
def normalized_value(self, med):
values = []
for peak in self.peaks.values():
values.append(peak.normalized_value(med))
return median(values)
@property
def peakpeak_distance(self):
keys = list(self.peaks.keys())
return abs(self.peaks[keys[0]].midpoint - self.peaks[keys[1]].midpoint)
class FrequencyDistribution(object):
def __init__(self, d=None):
self.dist = d or {}
def add(self, x):
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for key, val in self.dist.items():
x.append(key)
y.append(val)
return x, y
def mode(self):
return max(self.dist.items(), key=lambda data: data[1])[0]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def median(data):
"""
Find the integer median of the data set.
"""
if not data:
return 0
sdata = sorted(data)
if len(data) % 2 == 0:
return (sdata[len(data) // 2] + sdata[len(data) // 2 - 1]) / 2
else:
return sdata[len(data) // 2]
def make_keys(peaks):
return [data.midpoint for data in peaks]
def get_window(chromosome, target_peaks, distance):
"""
Returns a window of all peaks from a replicate within a certain distance of
a peak from another replicate.
"""
lower = list(target_peaks)[0].midpoint
upper = list(target_peaks)[0].midpoint
for peak in target_peaks:
lower = min(lower, peak.midpoint - distance)
upper = max(upper, peak.midpoint + distance)
start_index = bisect.bisect_left(chromosome.keys, lower)
end_index = bisect.bisect_right(chromosome.keys, upper)
return (chromosome.peaks[start_index: end_index], chromosome.name)
def match_largest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return max(window, key=lambda cpeak: cpeak.value)
def match_closest(window, peak, chrum):
if not window:
return None
if peak.chrom != chrum:
return None
return min(window, key=lambda match: abs(match.midpoint - peak.midpoint))
def frequency_histogram(freqs, dataset_path, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
xvals, yvals = freq.graph_series()
# Go from high to low
xvals.reverse()
pyplot.bar([x - 0.4 + 0.8 / len(freqs) * i for x in xvals], yvals, width=0.8 / len(freqs), color=COLORS[i])
pyplot.xticks(range(min(xvals), max(xvals) + 1), map(str, reversed(range(min(xvals), max(xvals) + 1))))
pyplot.xlabel(X_LABEL)
pyplot.ylabel(Y_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(dataset_path)
METHODS = {'closest': match_closest, 'largest': match_largest}
def gff_attrs(l):
if len(l) == 0:
return '.'
return ';'.join('%s=%s' % (tup[0], tup[1]) for tup in l)
def parse_gff_attrs(s):
d = {}
if s == '.':
return d
for item in s.split(';'):
key, val = item.split('=')
d[key] = val
return d
def gff_row(cname, start, end, score, source, stype='.', strand='.', phase='.', attrs=None):
return (cname, source, stype, start, end, score, strand, phase, gff_attrs(attrs or []))
def get_temporary_plot_path():
"""
Return the path to a temporary file with a valid image format
file extension that can be used with bioformats.
"""
tmp_dir = tempfile.mkdtemp(prefix='tmp-repmatch-')
fd, name = tempfile.mkstemp(suffix='.pdf', dir=tmp_dir)
os.close(fd)
return name
def process_files(dataset_paths, galaxy_hids, method, distance, step, replicates, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_statistics_histogram_file = output_files in ["all"] and method in ["all"]
if len(dataset_paths) < 2:
return
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
for match_method in match_methods:
statistics = perform_process(dataset_paths,
galaxy_hids,
match_method,
distance,
step,
replicates,
up_limit,
low_limit,
output_files,
output_matched_peaks,
output_unmatched_peaks,
output_detail,
output_statistics_table,
output_statistics_histogram)
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([stat['distribution'] for stat in [statistics]],
tmp_statistics_histogram_path,
METHODS.keys())
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
def perform_process(dataset_paths, galaxy_hids, method, distance, step, num_required, up_limit, low_limit, output_files,
output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram):
output_detail_file = output_files in ["all"] and output_detail is not None
output_statistics_table_file = output_files in ["all"] and output_statistics_table is not None
output_unmatched_peaks_file = output_files in ["all", "matched_peaks_unmatched_peaks"] and output_unmatched_peaks is not None
output_statistics_histogram_file = output_files in ["all"] and output_statistics_histogram is not None
replicates = []
for i, dataset_path in enumerate(dataset_paths):
try:
galaxy_hid = galaxy_hids[i]
r = Replicate(galaxy_hid, dataset_path)
replicates.append(r)
except Exception as e:
stop_err('Unable to parse file "%s", exception: %s' % (dataset_path, str(e)))
attrs = 'd%sr%s' % (distance, num_required)
if up_limit != 1000:
attrs += 'u%d' % up_limit
if low_limit != -1000:
attrs += 'l%d' % low_limit
if step != 0:
attrs += 's%d' % step
def td_writer(file_path):
# Returns a tab-delimited writer for a certain output
if ISPY2:
fh = open(file_path, 'wb')
return csv.writer(fh, delimiter='\t')
else:
fh = open(file_path, 'w', newline='')
return csv.writer(fh, delimiter='\t', quoting=csv.QUOTE_NONE)
labels = ('chrom',
'median midpoint',
'median midpoint+1',
'median normalized reads',
'replicates',
'median c-w distance',
'reads sum')
for replicate in replicates:
labels += ('chrom',
'median midpoint',
'median midpoint+1',
'c-w sum',
'c-w distance',
'replicate id')
matched_peaks_output = td_writer(output_matched_peaks)
if output_statistics_table_file:
statistics_table_output = td_writer(output_statistics_table)
statistics_table_output.writerow(('data', 'median read count'))
if output_detail_file:
detail_output = td_writer(output_detail)
detail_output.writerow(labels)
if output_unmatched_peaks_file:
unmatched_peaks_output = td_writer(output_unmatched_peaks)
unmatched_peaks_output.writerow(('chrom', 'midpoint', 'midpoint+1', 'c-w sum', 'c-w distance', 'replicate id'))
# Perform filtering
if up_limit < 1000 or low_limit > -1000:
for replicate in replicates:
replicate.filter(up_limit, low_limit)
# Actually merge the peaks
peak_groups = []
unmatched_peaks = []
freq = FrequencyDistribution()
def do_match(reps, distance):
# Copy list because we will mutate it, but keep replicate references.
reps = reps[:]
while len(reps) > 1:
# Iterate over each replicate as "main"
main = reps[0]
reps.remove(main)
for chromosome in list(main.chromosomes.values()):
peaks_by_value = chromosome.peaks[:]
# Sort main replicate by value
peaks_by_value.sort(key=lambda peak: -peak.value)
def search_for_matches(group):
# Here we use multiple passes, expanding the window to be
# +- distance from any previously matched peak.
while True:
new_match = False
for replicate in reps:
if replicate.id in group.peaks:
# Stop if match already found for this replicate
continue
try:
# Lines changed to remove a major bug by Rohit Reja.
window, chrum = get_window(replicate.chromosomes[chromosome.name], list(group.peaks.values()), distance)
match = METHODS[method](window, peak, chrum)
except KeyError:
continue
if match:
group.add_peak(replicate.id, match)
new_match = True
if not new_match:
break
# Attempt to enlarge existing peak groups
for group in peak_groups:
old_peaks = list(group.peaks.values())
search_for_matches(group)
for peak in list(group.peaks.values()):
if peak not in old_peaks:
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
# Attempt to find new peaks groups. For each peak in the
# main replicate, search for matches in the other replicates
for peak in peaks_by_value:
matches = PeakGroup()
matches.add_peak(main.id, peak)
search_for_matches(matches)
# Were enough replicates matched?
if matches.num_replicates >= num_required:
for peak in list(matches.peaks.values()):
peak.replicate.chromosomes[chromosome.name].remove_peak(peak)
peak_groups.append(matches)
# Zero or less = no stepping
if step <= 0:
do_match(replicates, distance)
else:
for d in range(0, distance, step):
do_match(replicates, d)
for group in peak_groups:
freq.add(group.num_replicates)
# Collect together the remaining unmatched_peaks
for replicate in replicates:
for chromosome in replicate.chromosomes.values():
for peak in chromosome.peaks:
freq.add(1)
unmatched_peaks.append(peak)
# Average the unmatched_peaks count in the graph by # replicates
med = median([peak.value for group in peak_groups for peak in group.peaks.values()])
for replicate in replicates:
replicate.median = median([peak.value for group in peak_groups for peak in group.peaks.values() if peak.replicate == replicate])
statistics_table_output.writerow((replicate.id, replicate.median))
for group in peak_groups:
# Output matched_peaks (matched pairs).
matched_peaks_output.writerow(gff_row(cname=group.chrom,
start=group.midpoint,
end=group.midpoint + 1,
score=group.normalized_value(med),
source='repmatch',
stype='.',
strand='.',
phase='.',
attrs=[('median_distance', group.median_distance),
('value_sum', group.value_sum),
('replicates', group.num_replicates)]))
if output_detail_file:
matched_peaks = (group.chrom,
group.midpoint,
group.midpoint + 1,
group.normalized_value(med),
group.num_replicates,
group.median_distance,
group.value_sum)
for peak in group.peaks.values():
matched_peaks += (peak.chrom, peak.midpoint, peak.midpoint + 1, peak.value, peak.distance, peak.replicate.id)
detail_output.writerow(matched_peaks)
if output_unmatched_peaks_file:
for unmatched_peak in unmatched_peaks:
unmatched_peaks_output.writerow((unmatched_peak.chrom,
unmatched_peak.midpoint,
unmatched_peak.midpoint + 1,
unmatched_peak.value,
unmatched_peak.distance,
unmatched_peak.replicate.id))
if output_statistics_histogram_file:
tmp_statistics_histogram_path = get_temporary_plot_path()
frequency_histogram([freq], tmp_statistics_histogram_path)
shutil.move(tmp_statistics_histogram_path, output_statistics_histogram)
return {'distribution': freq}
| mit |
cloud-fan/spark | python/pyspark/pandas/tests/test_numpy_compat.py | 15 | 8672 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas import set_option, reset_option
from pyspark.pandas.numpy_compat import unary_np_spark_mappings, binary_np_spark_mappings
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class NumPyCompatTest(PandasOnSparkTestCase, SQLTestUtils):
blacklist = [
# Koalas does not currently support
"conj",
"conjugate",
"isnat",
"matmul",
"frexp",
# Values are close enough but tests failed.
"arccos",
"exp",
"expm1",
"log", # flaky
"log10", # flaky
"log1p", # flaky
"modf",
"floor_divide", # flaky
# Results seem inconsistent in a different version of, I (Hyukjin) suspect, PyArrow.
# From PyArrow 0.15, seems it returns the correct results via PySpark. Probably we
# can enable it later when Koalas switches to PyArrow 0.15 completely.
"left_shift",
]
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
def test_np_add_series(self):
psdf = self.psdf
pdf = self.pdf
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(np.add(psdf.a, psdf.b), np.add(pdf.a, pdf.b).rename())
else:
self.assert_eq(np.add(psdf.a, psdf.b), np.add(pdf.a, pdf.b))
psdf = self.psdf
pdf = self.pdf
self.assert_eq(np.add(psdf.a, 1), np.add(pdf.a, 1))
def test_np_add_index(self):
k_index = self.psdf.index
p_index = self.pdf.index
self.assert_eq(np.add(k_index, k_index), np.add(p_index, p_index))
def test_np_unsupported_series(self):
psdf = self.psdf
with self.assertRaisesRegex(NotImplementedError, "pandas.*not.*support.*sqrt.*"):
np.sqrt(psdf.a, psdf.b)
def test_np_unsupported_frame(self):
psdf = self.psdf
with self.assertRaisesRegex(NotImplementedError, "on-Spark.*not.*support.*sqrt.*"):
np.sqrt(psdf, psdf)
def test_np_spark_compat_series(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
psdf = ps.from_pandas(pdf)
psdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf.a), np_func(psdf.a), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf.b).rename(), np_func(psdf.a, psdf.b), almost=True
)
else:
self.assert_eq(np_func(pdf.a, pdf.b), np_func(psdf.a, psdf.b), almost=True)
self.assert_eq(np_func(pdf.a, 1), np_func(psdf.a, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index().rename(),
np_func(psdf.a, psdf2.b).sort_index(),
almost=True,
)
else:
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index(),
np_func(psdf.a, psdf2.b).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
def test_np_spark_compat_frame(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
psdf = ps.from_pandas(pdf)
psdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf), np_func(psdf), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(np_func(pdf, pdf), np_func(psdf, psdf), almost=True)
self.assert_eq(np_func(pdf, 1), np_func(psdf, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(
np_func(pdf, pdf2).sort_index(),
np_func(psdf, psdf2).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_numpy_compat import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
phdowling/scikit-learn | sklearn/tests/test_multiclass.py | 136 | 23649 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
| bsd-3-clause |
btabibian/scikit-learn | sklearn/tests/test_base.py | 15 | 14534 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X, y=None):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
def test_pickle_version_warning_is_not_raised_with_matching_version():
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
tree_restored = assert_no_warnings(pickle.loads, tree_pickle)
# test that we can predict with the restored decision tree classifier
score_of_original = tree.score(iris.data, iris.target)
score_of_restored = tree_restored.score(iris.data, iris.target)
assert_equal(score_of_original, score_of_restored)
class TreeBadVersion(DecisionTreeClassifier):
def __getstate__(self):
return dict(self.__dict__.items(), _sklearn_version="something")
pickle_error_message = (
"Trying to unpickle estimator {estimator} from "
"version {old_version} when using version "
"{current_version}. This might "
"lead to breaking code or invalid results. "
"Use at your own risk.")
def test_pickle_version_warning_is_issued_upon_different_version():
iris = datasets.load_iris()
tree = TreeBadVersion().fit(iris.data, iris.target)
tree_pickle_other = pickle.dumps(tree)
message = pickle_error_message.format(estimator="TreeBadVersion",
old_version="something",
current_version=sklearn.__version__)
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle():
iris = datasets.load_iris()
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = pickle_error_message.format(estimator="TreeNoVersion",
old_version="pre-0.18",
current_version=sklearn.__version__)
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
iris = datasets.load_iris()
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
try:
module_backup = TreeNoVersion.__module__
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
finally:
TreeNoVersion.__module__ = module_backup
class DontPickleAttributeMixin(object):
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
def __setstate__(self, state):
state["_restored"] = True
self.__dict__.update(state)
class MultiInheritanceEstimator(BaseEstimator, DontPickleAttributeMixin):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def test_pickling_when_getstate_is_overwritten_by_mixin():
estimator = MultiInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
assert_true(estimator_restored._restored)
def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn():
try:
estimator = MultiInheritanceEstimator()
text = "this attribute should not be pickled"
estimator._attribute_not_pickled = text
old_mod = type(estimator).__module__
type(estimator).__module__ = "notsklearn"
serialized = estimator.__getstate__()
assert_dict_equal(serialized, {'_attribute_not_pickled': None,
'attribute_pickled': 5})
serialized['attribute_pickled'] = 4
estimator.__setstate__(serialized)
assert_equal(estimator.attribute_pickled, 4)
assert_true(estimator._restored)
finally:
type(estimator).__module__ = old_mod
class SingleInheritanceEstimator(BaseEstimator):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
@ignore_warnings(category=(UserWarning))
def test_pickling_works_when_getstate_is_overwritten_in_the_child_class():
estimator = SingleInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
| bsd-3-clause |
microsoft/Azure-Kinect-Sensor-SDK | src/python/k4a/examples/image_transformations.py | 1 | 4449 | '''
image_transformations.py
A simple program that transforms images from one camera coordinate to another.
Requirements:
Users should install the following python packages before using this module:
matplotlib
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
Kinect For Azure SDK.
'''
# This package is used for displaying the images.
# It is not part of the k4a package and is not a hard requirement for k4a.
# Users need to install these packages in order to use this module.
import matplotlib.pyplot as plt
import numpy as np
# This will import all the public symbols into the k4a namespace.
import k4a
def plot_images(image1:k4a.Image, image2:k4a.Image, image3:k4a.Image, cmap:str=''):
# Create figure and subplots.
fig = plt.figure()
ax = []
ax.append(fig.add_subplot(1, 3, 1, label="Color"))
ax.append(fig.add_subplot(1, 3, 2, label="Depth"))
ax.append(fig.add_subplot(1, 3, 3, label="IR"))
# Display images.
im = []
im.append(ax[0].imshow(image1.data))
im.append(ax[1].imshow(image2.data, cmap='jet'))
if len(cmap) == 0:
im.append(ax[2].imshow(image3.data))
else:
im.append(ax[2].imshow(image3.data, cmap=cmap))
# Create axes titles.
ax[0].title.set_text('Color')
ax[1].title.set_text('Depth')
ax[2].title.set_text('Transformed Image')
plt.show()
def image_transformations():
# Open a device using the "with" syntax.
with k4a.Device.open() as device:
# In order to start capturing frames, need to start the cameras.
# The start_cameras() function requires a device configuration which
# specifies the modes in which to put the color and depth cameras.
# For convenience, the k4a package pre-defines some configurations
# for common usage of the Azure Kinect device, but the user can
# modify the values to set the device in their preferred modes.
device_config = k4a.DEVICE_CONFIG_BGRA32_1080P_WFOV_2X2BINNED_FPS15
status = device.start_cameras(device_config)
if status != k4a.EStatus.SUCCEEDED:
raise IOError("Failed to start cameras.")
# In order to create a Transformation class, we first need to get
# a Calibration instance. Getting a calibration object needs the
# depth mode and color camera resolution. Thankfully, this is part
# of the device configuration used in the start_cameras() function.
calibration = device.get_calibration(
depth_mode=device_config.depth_mode,
color_resolution=device_config.color_resolution)
# Create a Transformation object using the calibration object as param.
transform = k4a.Transformation(calibration)
# Get a capture using the "with" syntax.
with device.get_capture(-1) as capture:
color = capture.color
depth = capture.depth
ir = capture.ir
# Get a color image but transformed in the depth space.
color_transformed = transform.color_image_to_depth_camera(depth, color)
plot_images(color, depth, color_transformed)
# Get a depth image but transformed in the color space.
depth_transformed = transform.depth_image_to_color_camera(depth)
plot_images(color, depth, depth_transformed, cmap='jet')
# Get a depth image and custom image but transformed in the color
# space. Create a custom image. This must have EImageFormat.CUSTOM8
# or EImageFormat.CUSTOM16 as the image_format, so create an
# entirely new Image and copy the IR data to that image.
ir_custom = k4a.Image.create(
k4a.EImageFormat.CUSTOM16,
ir.width_pixels,
ir.height_pixels,
ir.stride_bytes)
np.copyto(ir_custom.data, ir.data)
depth_transformed, ir_transformed = transform.depth_image_to_color_camera_custom(
depth, ir_custom, k4a.ETransformInterpolationType.LINEAR, 0)
plot_images(color, depth_transformed, ir_transformed, cmap='gray')
# There is no need to delete resources since Python will take care
# of releasing resources in the objects' deleters. To explicitly
# delete the images, capture, and device objects, call del on them.
if __name__ == '__main__':
image_transformations()
| mit |
k2kobayashi/sprocket | sprocket/model/tests/test_ms.py | 1 | 1786 | import unittest
import numpy as np
from sprocket.model import MS
from sprocket.util import low_pass_filter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
saveflag = True
dim = 4
class MSTest(unittest.TestCase):
def test_MSstatistics(self):
ms = MS()
datalist = []
for i in range(1, 4):
T = 200 * i
data = low_pass_filter(np.random.rand(T * dim).reshape(T, dim), 50, fs=200, n_taps=63)
datalist.append(data)
msstats = ms.estimate(datalist)
data = np.random.rand(500 * dim).reshape(500, dim)
data_lpf = low_pass_filter(data, 50, fs=200, n_taps=63)
data_ms = ms.logpowerspec(data)
data_lpf_ms = ms.logpowerspec(data_lpf)
odata = ms.postfilter(data, msstats, msstats, startdim=0)
odata_lpf = ms.postfilter(data_lpf, msstats, msstats, startdim=0)
assert data.shape[0] == odata.shape[0]
if saveflag:
# plot sequence
plt.figure()
plt.plot(data[:, 0], label='data')
plt.plot(data_lpf[:, 0], label='data_lpf')
plt.plot(odata[:, 0], label='odata')
plt.plot(odata_lpf[:, 0], label='odata_lpf')
plt.xlim(0, 100)
plt.legend()
plt.savefig('ms_seq.png')
# plot MS
plt.figure()
plt.plot(msstats[:, 0], label='msstats')
plt.plot(data_ms[:, 0], label='data')
plt.plot(data_lpf_ms[:, 0], label='data_lpf')
plt.plot(ms.logpowerspec(odata)[:, 0], label='mspf data')
plt.plot(ms.logpowerspec(odata_lpf)[:, 0], label='mspf data_lpf')
plt.xlim(0, msstats.shape[0] // 2 + 1)
plt.legend()
plt.savefig('ms.png')
| mit |
Djabbz/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 9 | 5718 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214:
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
CGATOxford/CGATPipelines | CGATPipelines/pipeline_rnaseqqc.py | 1 | 57789 |
"""
====================
RNASeqQC pipeline
====================
Overview
========
This pipeline should be run as the first step in your RNA seq analysis
work flow. It will help detect error and biases within your raw
data. The output of the pipeline can be used to filter out problematic
cells in a standard RNA seq experiment. For single cell RNA seq the
pipeline_rnaseqqc.py should be run instead.
Sailfish is used to perform rapid alignment-free transcript
quantification and hisat is used to align a subset of reads to the
reference genome.
From the sailfish and hisat output, a number of analyses are
performed, either within the pipeline or during the reporting:
- Proportion of reads aligned to annotated features
(rRNA, protein coding, lincRNA etc)
- Sequencing depth saturation curves Per Sample
- Per-sample expression distributions
- Strandedness assesment
- Assessment of sequence biases
- Expression of top genes and expression of genes of interest
Most of the above analysis will group samples by the sample factors
(see Important configuration options below for details on how factors
are identified)
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning`
on general information how to use CGAT pipelines.
Input
-----
Reads are imported by placing files or linking to files in the :term:
`working directory`.
The following suffixes/file types are possible:
sra
Short-Read Archive format. Reads will be extracted using the :file:
`fastq-dump` tool.
fastq.gz
Single-end reads in fastq format.
fastq.1.gz, fastq.2.gz
Paired-end reads in fastq format.
The two fastq files must be sorted by read-pair.
.. note::
Quality scores need to be of the same scale for all input files.
Thus it might be difficult to mix different formats.
Important configuration options
===============================
To determine the experimental factors in your experiment, name files
with factors separated by ``-``, for example::
sample1-mRNA-10k-R1-L01.fastq.1.gz
sample1-mRNA-10k-R1-L01.fastq.2.gz
sample1-mRNA-10k-R1-L02.fastq.1.gz
sample1-mRNA-10k-R1-L02.fastq.2.gz
sample1-mRNA-150k-R1-L01.fastq.1.gz
sample1-mRNA-150k-R1-L01.fastq.2.gz
sample1-mRNA-150k-R1-L02.fastq.1.gz
sample1-mRNA-150k-R1-L02.fastq.2.gz
and then set the ``factors`` variable in :file:`pipeline.ini` to::
factors=experiment-source-replicate-lane
If you want to include additional factors which are not identifiable
from the sample names you can specfify these in an optional file
"additional_factors.tsv". This file must contain the sample names in
the first columns and then an additional column for each factor (tab
separated). See below for an example to include the additional factors
"preparation_date" and "rna_quality":
sample preparation_date rna_quality
sample1-mRNA-10k-R1-L01 01-01-2016 poor
sample1-mRNA-10k-R1-L01 01-01-2016 poor
sample1-mRNA-10k-R1-L02 04-01-2016 good
sample1-mRNA-10k-R1-L02 04-01-2016 good
Pipeline output
===============
The major output is a set of HTML pages and plots reporting on the
apparent biases in transcript abudance within the sequence archive
The pipeline also produces output in the database file:`csvdb`.
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_rnaseqqc.tgz.
To run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_readqc.tgz
tar -xvzf pipeline_readqc.tgz
cd pipeline_readqc
python <srcdir>/pipeline_readqc.py make full
Requirements:
+---------+------------+------------------------------------------------+
|*Program*|*Version* |*Purpose* |
+---------+------------+------------------------------------------------+
|sailfish |>=0.9.0 |pseudo alignment |
+---------+------------+------------------------------------------------+
|hisat |>=0.1.6 |read mapping |
+---------+------------+------------------------------------------------+
|samtools |>=0.1.16 |bam/sam files
+---------+------------+------------------------------------------------+
|bedtools | |work with intervals
+---------+------------+------------------------------------------------+
|picard |>=1.42 |bam/sam files
+---------+------------+------------------------------------------------+
|bamstats |>=1.22 |from CGR, liverpool
+---------+------------+------------------------------------------------+
|sra-tools| |extracting sra files
+---------+------------+------------------------------------------------+
Glossary
========
.. glossary::
hisat
hisat_- a read mapper used in the pipeline because it is
relatively quick to run
sailfish
sailfish_-a pseudoaligner that is used for quantifying the
abundance transcripts
.._hisat: http://ccb.jhu.edu/software/hisat/manual.shtml
.. sailfish: https://github.com/kingsfordgroup/sailfish
Code
====
"""
###################################################
###################################################
###################################################
# load modules
###################################################
# import ruffus
from ruffus import transform, suffix, regex, merge, \
follows, mkdir, originate, add_inputs, jobs_limit, split
# import useful standard python modules
import sys
import os
import sqlite3
import re
import pandas as pd
import numpy as np
import itertools
from scipy.stats import linregress
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from rpy2.robjects import r as R
from rpy2.robjects import pandas2ri
import CGAT.Experiment as E
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.PipelineWindows as PipelineWindows
import CGATPipelines.PipelineMappingQC as PipelineMappingQC
import CGATPipelines.Pipeline as P
import json
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_genesets.py",
prefix="annotations_",
update_interface=True,
restrict_interface=True))
# Helper functions mapping tracks to conditions, etc
# determine the location of the input files (reads).
try:
PARAMS["input"]
except KeyError:
DATADIR = "."
else:
if PARAMS["input"] == 0:
DATADIR = "."
elif PARAMS["input"] == 1:
DATADIR = "data.dir"
else:
DATADIR = PARAMS["input"] # not recommended practise.
#########################################################################
#########################################################################
#########################################################################
# define input files
SEQUENCESUFFIXES = ("*.fastq.1.gz",
"*.fastq.gz",
"*.fa.gz",
"*.sra",
"*.export.txt.gz",
"*.csfasta.gz",
"*.csfasta.F3.gz",
)
SEQUENCEFILES = tuple([os.path.join(DATADIR, suffix_name)
for suffix_name in SEQUENCESUFFIXES])
SEQUENCEFILES_REGEX = regex(
r"(.*\/)*(\S+).(fastq.1.gz|fastq.gz|fa.gz|sra|"
"csfasta.gz|csfasta.F3.gz|export.txt.gz)")
###################################################################
# Pipeline Utility functions
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
if not os.path.exists(PARAMS["annotations_database"]):
raise ValueError(
"can't find database '%s'" %
PARAMS["annotations_database"])
statement = '''ATTACH DATABASE '%s' as annotations''' % \
(PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
def findSuffixedFile(prefix, suffixes):
for check_suffix in suffixes:
check_infile = prefix + check_suffix
if os.path.exists(check_infile):
return (check_infile, check_suffix)
###################################################################
# count number of reads
###################################################################
@follows(mkdir("nreads.dir"))
@transform(SEQUENCEFILES,
SEQUENCEFILES_REGEX,
r"nreads.dir/\2.nreads")
def countReads(infile, outfile):
'''Count number of reads in input files.'''
m = PipelineMapping.Counter()
statement = m.build((infile,), outfile)
P.run()
###################################################################
# build geneset
###################################################################
@follows(mkdir("geneset.dir"))
@merge(PARAMS["annotations_interface_geneset_all_gtf"],
"geneset.dir/reference.gtf.gz")
def buildReferenceGeneSet(infile, outfile):
''' filter full gene set and add attributes to create the reference gene set
Performs merge and filter operations:
* Merge exons separated by small introns (< 5bp).
* Remove transcripts with very long introns (`max_intron_size`)
* Remove transcripts located on contigs to be ignored (`remove_contigs`)
(usually: chrM, _random, ...)
* (Optional) Remove transcripts overlapping repetitive sequences
(`rna_file`)
This preserves all features in a gtf file (exon, CDS, ...)
Runs cuffcompare with `infile` against itself to add
attributes such as p_id and tss_id.
Parameters
----------
infile : str
Input filename in :term:`gtf` format
outfile : str
Input filename in :term:`gtf` format
annotations_interface_rna_gff : str
:term:`PARAMS`. Filename of :term:`gtf` file containing
repetitive rna annotations
genome_dir : str
:term:`PARAMS`. Directory of :term:fasta formatted files
genome : str
:term:`PARAMS`. Genome name (e.g hg38)
'''
tmp_mergedfiltered = P.getTempFilename(".")
if "geneset_remove_repetetive_rna" in PARAMS:
rna_file = PARAMS["annotations_interface_rna_gff"]
else:
rna_file = None
gene_ids = PipelineMapping.mergeAndFilterGTF(
infile,
tmp_mergedfiltered,
"%s.removed.gz" % outfile,
genome=os.path.join(PARAMS["genome_dir"], PARAMS["genome"]),
max_intron_size=PARAMS["max_intron_size"],
remove_contigs=PARAMS["geneset_remove_contigs"],
rna_file=rna_file)
# Add tss_id and p_id
PipelineMapping.resetGTFAttributes(
infile=tmp_mergedfiltered,
genome=os.path.join(PARAMS["genome_dir"], PARAMS["genome"]),
gene_ids=gene_ids,
outfile=outfile)
os.unlink(tmp_mergedfiltered)
@follows(mkdir("geneset.dir"))
@originate("geneset.dir/protein_coding_gene_ids.tsv")
def identifyProteinCodingGenes(outfile):
'''Output a list of proteing coding gene identifiers
Identify protein coding genes from the annotation database table
and output the gene identifiers
Parameters
----------
oufile : str
Output file of :term:`gtf` format
annotations_interface_table_gene_info : str
:term:`PARAMS`. Database table name for gene information
'''
dbh = connect()
table = os.path.basename(PARAMS["annotations_interface_table_gene_info"])
select = dbh.execute("""SELECT DISTINCT gene_id
FROM annotations.%(table)s
WHERE gene_biotype = 'protein_coding'""" % locals())
with IOTools.openFile(outfile, "w") as outf:
outf.write("gene_id\n")
outf.write("\n".join((x[0] for x in select)) + "\n")
@transform(buildReferenceGeneSet,
suffix("reference.gtf.gz"),
add_inputs(identifyProteinCodingGenes),
"refcoding.gtf.gz")
def buildCodingGeneSet(infiles, outfile):
'''build a gene set with only protein coding transcripts.
Retain the genes from the gene_tsv file in the outfile geneset.
The gene set will contain all transcripts of protein coding genes,
including processed transcripts. The gene set includes UTR and
CDS.
Parameters
----------
infiles : list
infile: str
Input filename in :term:`gtf` format
genes_ts: str
Input filename in :term:`tsv` format
outfile: str
Output filename in :term:`gtf` format
'''
infile, genes_tsv = infiles
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=gene
--map-tsv-file=%(genes_tsv)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
@follows(mkdir("geneset.dir"))
@merge(PARAMS["annotations_interface_geneset_all_gtf"],
"geneset.dir/coding_exons.gtf.gz")
def buildCodingExons(infile, outfile):
'''compile the set of protein coding exons.
Filter protein coding transcripts
This set is used for splice-site validation
Parameters
----------
infile : str
Input filename in :term:`gtf` format
outfile: str
Output filename in :term:`gtf` format
'''
statement = '''
zcat %(infile)s
| awk '$3 == "CDS"'
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
--log=%(outfile)s.log
| awk -v OFS="\\t" -v FS="\\t" '{$3="exon"; print}'
| cgat gtf2gtf
--method=merge-exons
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
@transform(buildCodingGeneSet, suffix(".gtf.gz"), ".junctions")
def buildJunctions(infile, outfile):
'''build file with splice junctions from gtf file.
Identify the splice junctions from a gene set :term:`gtf`
file. A junctions file is a better option than supplying a GTF
file, as parsing the latter often fails. See:
http://seqanswers.com/forums/showthread.php?t=7563
Parameters
----------
infile : str
Input filename in :term:`gtf` format
outfile: str
Output filename
'''
outf = IOTools.openFile(outfile, "w")
njunctions = 0
for gffs in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(infile, "r"))):
gffs.sort(key=lambda x: x.start)
end = gffs[0].end
for gff in gffs[1:]:
# subtract one: these are not open/closed coordinates but
# the 0-based coordinates
# of first and last residue that are to be kept (i.e., within the
# exon).
outf.write("%s\t%i\t%i\t%s\n" %
(gff.contig, end - 1, gff.start, gff.strand))
end = gff.end
njunctions += 1
outf.close()
if njunctions == 0:
E.warn('no junctions found in gene set')
return
else:
E.info('found %i junctions before removing duplicates' % njunctions)
# make unique
statement = '''mv %(outfile)s %(outfile)s.tmp;
cat < %(outfile)s.tmp | sort | uniq > %(outfile)s;
rm -f %(outfile)s.tmp; '''
P.run()
@transform(buildCodingGeneSet,
suffix(".gtf.gz"),
".fasta")
def buildTranscriptFasta(infile, outfile):
"""build geneset where all exons within a gene
are merged.
"""
dbname = outfile[:-len(".fasta")]
statement = '''zcat %(infile)s
| cgat gff2fasta
--is-gtf
--genome=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| cgat index_fasta
%(dbname)s --force-output -
> %(dbname)s.log
'''
P.run()
@transform(buildCodingGeneSet,
suffix(".gtf.gz"),
".tsv")
def buildTranscriptGeneMap(infile, outfile):
"""build a map of transcript ids to gene ids."""
statement = """
zcat %(infile)s
|cgat gtf2tsv
--attributes-as-columns
--output-only-attributes
| cgat csv_cut transcript_id gene_id
> %(outfile)s"""
P.run()
###################################################################
# subset fastqs
###################################################################
@follows(mkdir("fastq.dir"))
@transform(SEQUENCEFILES,
SEQUENCEFILES_REGEX,
r"fastq.dir/\2.subset")
def subsetSequenceData(infile, outfile):
"""subset fastq files"""
ignore_pipe_erors = True
ignore_errors = True
m = PipelineMapping.SubsetHead(limit=PARAMS["sample_size"])
statement = m.build((infile,), outfile)
P.run()
P.touch(outfile)
@follows(mkdir("fastq.dir"))
@merge(countReads,
"fastq.dir/highest_depth_sample.sentinel")
def identifyHighestDepth(infiles, outfile):
''' identify the sample with the highest depth'''
highest_depth = 0
for count_inf in infiles:
for line in IOTools.openFile(count_inf, "r"):
if not line.startswith("nreads"):
continue
nreads = int(line[:-1].split("\t")[1])
if nreads > highest_depth:
highest_depth = nreads
highest_depth_sample = os.path.basename(
P.snip(count_inf, ".nreads"))
assert highest_depth_sample, ("unable to identify the sample "
"with the highest depth")
infile, inf_suffix = findSuffixedFile(highest_depth_sample,
[x[1:] for x in SEQUENCESUFFIXES])
infile = os.path.abspath(infile)
assert infile, ("unable to find the raw data for the "
"sample with the highest depth")
dst = os.path.abspath(P.snip(outfile, ".sentinel") + inf_suffix)
def forcesymlink(src, dst):
try:
os.symlink(src, dst)
except:
os.remove(dst)
os.symlink(src, dst)
forcesymlink(infile, dst)
# if paired end fastq, need to link the paired end too!
if inf_suffix == ".fastq.1.gz":
dst2 = P.snip(outfile, ".sentinel") + ".fastq.2.gz"
forcesymlink(infile.replace(".fastq.1.gz", ".fastq.2.gz"), dst2)
forcesymlink("%s.nreads" % highest_depth_sample,
"nreads.dir/highest_depth_sample.nreads")
P.touch(outfile)
@split(identifyHighestDepth,
"fastq.dir/highest_counts_subset_*")
def subsetRange(infile, outfiles):
'''subset highest depth sample to 10%-100% depth '''
outfile = "fastq.dir/highest_counts_subset.sentinel"
infile_prefix = P.snip(os.path.basename(infile), ".sentinel")
nreads_inf = "nreads.dir/%s.nreads" % infile_prefix
for line in IOTools.openFile(nreads_inf, "r"):
if not line.startswith("nreads"):
continue
nreads = int(line[:-1].split("\t")[1])
infile, inf_suffix = findSuffixedFile(P.snip(infile, ".sentinel"),
[x[1:] for x in SEQUENCESUFFIXES])
# PipelineMapping.Counter double counts for paired end
# Note: this wont handle sra. Need to add a call to Sra.peak to check for
# paired end files in SRA
if inf_suffix == ".fastq.1.gz":
nreads = nreads / 2
subset_depths = list(range(10, 110, 10))
limits = [int(nreads / (100.0 / int(depth)))
for depth in subset_depths]
ignore_pipe_erors = True
ignore_errors = True
m = PipelineMapping.SubsetHeads(limits=limits)
statement = m.build((infile,), outfile)
P.run()
P.touch(outfile)
@follows(subsetSequenceData)
def subset():
pass
###################################################################
# map reads
###################################################################
@follows(mkdir("hisat.dir"))
@transform(subsetSequenceData,
regex("fastq.dir/(.*).subset"),
add_inputs(buildJunctions),
r"hisat.dir/\1.hisat.bam")
def mapReadsWithHisat(infiles, outfile):
'''
Map reads using Hisat (spliced reads).
Parameters
----------
infiles: list
contains two filenames -
infiles[0]: str
filename of reads file
can be :term:`fastq`, :term:`sra`, csfasta
infiles[1]: str
filename with suffix .junctions containing a list of known
splice junctions.
hisat_threads: int
:term:`PARAMS`
number of threads with which to run hisat
hisat_memory: str
:term:`PARAMS`
memory required for hisat job
hisat_executable: str
:term:`PARAMS`
path to hisat executable
hisat_library_type: str
:term:`PARAMS`
hisat rna-strandess parameter, see
https://ccb.jhu.edu/software/hisat/manual.shtml#command-line
hisat_options: str
options string for hisat, see
https://ccb.jhu.edu/software/hisat/manual.shtml#command-line
hisat_index_dir: str
path to directory containing hisat indices
strip_sequence: bool
:term:`PARAMS`
if set, strip read sequence and quality information
outfile: str
:term:`bam` filename to write the mapped reads in bam format.
.. note::
If hisat fails with an error such as::
Error: segment-based junction search failed with err =-6
what(): std::bad_alloc
it means that it ran out of memory.
'''
job_threads = PARAMS["hisat_threads"]
job_memory = PARAMS["hisat_memory"]
m = PipelineMapping.Hisat(
executable=P.substituteParameters(
**locals())["hisat_executable"],
strip_sequence=PARAMS["strip_sequence"])
infile, junctions = infiles
infile = P.snip(infile, ".subset") + ".fastq.gz"
if not os.path.exists(infile):
infile = P.snip(infile, ".fastq.gz") + ".fastq.1.gz"
statement = m.build((infile,), outfile)
P.run()
###################################################################
# build mapping stats
###################################################################
@transform(mapReadsWithHisat,
regex("(.*)/(.*)\.(.*).bam"),
r"\1/\2.\3.readstats")
def buildBAMStats(infile, outfile):
'''count number of reads mapped, duplicates, etc.
Excludes regions overlapping repetitive RNA sequences
Parameters
----------
infiles : list
infiles[0] : str
Input filename in :term:`bam` format
infiles[1] : str
Input filename with number of reads per sample
outfile : str
Output filename with read stats
annotations_interface_rna_gtf : str
:term:`PARMS`. :term:`gtf` format file with repetitive rna
'''
rna_file = PARAMS["annotations_interface_rna_gff"]
job_memory = "16G"
track = P.snip(os.path.basename(infile), ".hisat.bam")
# if a fastq file exists, submit for counting
if os.path.exists(track + ".fastq.gz"):
fastqfile = track + ".fastq.gz"
elif os.path.exists(track + ".fastq.1.gz"):
fastqfile = track + ".fastq.1.gz"
else:
fastqfile = None
if fastqfile is not None:
fastq_option = "--fastq-file=%s" % fastqfile
else:
fastq_option = ""
statement = '''
cgat bam2stats
%(fastq_option)s
--force-output
--mask-bed-file=%(rna_file)s
--ignore-masked-reads
--num-reads=%(sample_size)i
--output-filename-pattern=%(outfile)s.%%s
< %(infile)s
> %(outfile)s
'''
P.run()
@P.add_doc(PipelineMappingQC.loadBAMStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@merge(buildBAMStats, "bam_stats.load")
def loadBAMStats(infiles, outfile):
''' load bam statistics into bam_stats table '''
PipelineMappingQC.loadBAMStats(infiles, outfile)
@P.add_doc(PipelineWindows.summarizeTagsWithinContext)
@transform(mapReadsWithHisat,
suffix(".bam"),
add_inputs(
PARAMS["annotations_interface_genomic_context_bed"]),
".contextstats.tsv.gz")
def buildContextStats(infiles, outfile):
''' build mapping context stats '''
PipelineWindows.summarizeTagsWithinContext(
infiles[0], infiles[1], outfile)
@P.add_doc(PipelineWindows.loadSummarizedContextStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(loadBAMStats)
@merge(buildContextStats, "context_stats.load")
def loadContextStats(infiles, outfile):
''' load context mapping statistics into context_stats table '''
PipelineWindows.loadSummarizedContextStats(infiles, outfile)
@originate("geneset.dir/altcontext.bed.gz")
def buildBedContext(outfile):
''' Generate a bed file that can be passed into buildAltContextStats '''
dbh = connect()
tmp_bed_sorted_filename = P.getTempFilename(shared=True)
sql_statements = [
'''SELECT DISTINCT GTF.contig, GTF.start, GTF.end, "lincRNA"
FROM gene_info GI
JOIN geneset_lincrna_exons_gtf GTF
ON GI.gene_id=GTF.gene_id
WHERE GI.gene_biotype == "lincRNA"''',
'''SELECT DISTINCT GTF.contig, GTF.start, GTF.end, "snoRNA"
FROM gene_info GI
JOIN geneset_noncoding_exons_gtf GTF
ON GI.gene_id=GTF.gene_id
WHERE GI.gene_biotype == "snoRNA"''',
'''SELECT DISTINCT GTF.contig, GTF.start, GTF.end, "miRNA"
FROM gene_info GI
JOIN geneset_noncoding_exons_gtf GTF
ON GI.gene_id=GTF.gene_id
WHERE GI.gene_biotype == "miRNA"''',
'''SELECT DISTINCT GTF.contig, GTF.start, GTF.end, "protein_coding"
FROM gene_info GI
JOIN geneset_coding_exons_gtf GTF
ON GI.gene_id=GTF.gene_id
WHERE GI.gene_biotype == "protein_coding"''']
with IOTools.openFile(tmp_bed_sorted_filename, "w") as tmp_bed_sorted:
for sql_statement in sql_statements:
state = dbh.execute(sql_statement)
for line in state:
tmp_bed_sorted.write(("%s\n") % "\t".join(map(str, line)))
statement = '''
sort -k1,1 -k2,2n -k3,3n
< %(tmp_bed_sorted_filename)s
| bgzip
> %(outfile)s'''
P.run()
os.unlink(tmp_bed_sorted_filename)
@P.add_doc(PipelineWindows.summarizeTagsWithinContext)
@follows(buildBedContext)
@transform(mapReadsWithHisat,
suffix(".bam"),
add_inputs(buildBedContext),
".altcontextstats.tsv.gz")
def buildAltContextStats(infiles, outfile):
''' build mapping context stats of snoRNA, miRNA,
lincRNA, protein coding '''
infile, bed = infiles
PipelineWindows.summarizeTagsWithinContext(
infile, bed, outfile)
@P.add_doc(PipelineWindows.loadSummarizedContextStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(loadContextStats)
@merge(buildAltContextStats, "altcontext_stats.load")
def loadAltContextStats(infiles, outfile):
''' load context mapping statistics into context_stats table '''
PipelineWindows.loadSummarizedContextStats(infiles,
outfile,
suffix=".altcontextstats.tsv.gz")
###################################################################
# alignment-free quantification
###################################################################
@follows(mkdir("sailfish.dir"))
@transform(buildTranscriptFasta,
regex("(\S+)"),
"sailfish.dir/transcripts.sailfish.index")
def indexForSailfish(infile, outfile):
'''create a sailfish index'''
statement = '''
sailfish index --transcripts=%(infile)s
--out=%(outfile)s '''
P.run()
@transform(SEQUENCEFILES,
SEQUENCEFILES_REGEX,
add_inputs(indexForSailfish,
buildCodingGeneSet,
buildTranscriptGeneMap),
r"sailfish.dir/\2/quant.sf")
def runSailfish(infiles, outfile):
'''quantify abundance'''
job_threads = PARAMS["sailfish_threads"]
job_memory = PARAMS["sailfish_memory"]
infile, index, geneset, transcript_map = infiles
sailfish_bootstrap = 1
sailfish_libtype = PARAMS["sailfish_libtype"]
sailfish_options = PARAMS["sailfish_options"]
sailfish_options += " --geneMap %s" % transcript_map
m = PipelineMapping.Sailfish()
statement = m.build((infile,), outfile)
P.run()
@split(runSailfish,
["sailfish.dir/sailfish_transcripts.tsv.gz",
"sailfish.dir/sailfish_genes.tsv.gz"])
def mergeSailfishResults(infiles, outfiles):
''' concatenate sailfish expression estimates from each sample'''
s_infiles = " " .join(sorted(infiles))
outfile_transcripts, outfile_genes = outfiles
statement = """
cat %(s_infiles)s
| awk -v OFS="\\t"
'/^Name/
{ sample_id+=1;
if (sample_id == 1) {
gsub(/Name/, "transcript_id");
printf("sample_id\\t%%s\\n", $0)};
next;}
!/^#/
{printf("%%i\\t%%s\\n", sample_id, $0)}'
| gzip
> %(outfile_transcripts)s
"""
P.run()
s_infiles = " ".join(
[re.sub("quant.sf", "quant.genes.sf", x) for x in infiles])
statement = """
cat %(s_infiles)s
| awk -v OFS="\\t"
'/^Name/
{ sample_id+=1;
if (sample_id == 1) {
gsub(/Name/, "gene_id");
printf("sample_id\\t%%s\\n", $0)};
next;}
!/^#/
{printf("%%i\\t%%s\\n", sample_id, $0)}'
| gzip
> %(outfile_genes)s
"""
P.run()
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(mergeSailfishResults,
suffix(".tsv.gz"),
".load")
def loadSailfishResults(infile, outfile):
P.load(infile, outfile,
options="--add-index=sample_id "
"--add-index=gene_id "
"--add-index=transcript_id "
"--map=sample_id:int")
###################################################################
# strand bias
###################################################################
@follows(mkdir("geneset.dir"))
@merge(PARAMS["annotations_interface_geneset_all_gtf"],
"geneset.dir/refflat.txt")
def buildRefFlat(infile, outfile):
'''build flat geneset for Picard RnaSeqMetrics.'''
tmpflat = P.getTempFilename(".")
statement = '''
gtfToGenePred -genePredExt -geneNameAsName2 %(infile)s %(tmpflat)s;
paste <(cut -f 12 %(tmpflat)s) <(cut -f 1-10 %(tmpflat)s)
> %(outfile)s
'''
P.run()
os.unlink(tmpflat)
@P.add_doc(PipelineMappingQC.buildPicardRnaSeqMetrics)
@transform(mapReadsWithHisat,
suffix(".bam"),
add_inputs(buildRefFlat),
".picard_rna_metrics")
def buildPicardRnaSeqMetrics(infiles, outfile):
'''Get duplicate stats from picard RNASeqMetrics '''
# convert strandness to tophat-style library type
if PARAMS["hisat_library_type"] == ("RF" or "R"):
strand = "SECOND_READ_TRANSCRIPTION_STRAND"
elif PARAMS["hisat_library_type"] == ("FR" or "F"):
strand = "FIRST_READ_TRANSCRIPTION_STRAND"
else:
strand = "NONE"
PipelineMappingQC.buildPicardRnaSeqMetrics(infiles, strand, outfile)
@P.add_doc(PipelineMappingQC.loadPicardRnaSeqMetrics)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@merge(buildPicardRnaSeqMetrics, ["picard_rna_metrics.load",
"picard_rna_histogram.load"])
def loadPicardRnaSeqMetrics(infiles, outfiles):
'''merge alignment stats into single tables.'''
PipelineMappingQC.loadPicardRnaSeqMetrics(infiles, outfiles)
###################################################################
# saturation analysis
###################################################################
@transform(subsetRange,
regex("fastq.dir/highest_counts_subset_(\d+)."
"(fastq.1.gz|fastq.gz|fa.gz|sra|"
"csfasta.gz|csfasta.F3.gz|export.txt.gz)"),
add_inputs(indexForSailfish,
buildCodingGeneSet,
buildTranscriptGeneMap),
r"sailfish.dir/highest_counts_subset_\1/quant.sf")
def runSailfishSaturation(infiles, outfile):
'''quantify abundance of transcripts with increasing subsets of the data'''
job_threads = PARAMS["sailfish_threads"]
job_memory = PARAMS["sailfish_memory"]
infile, index, geneset, transcript_map = infiles
sailfish_bootstrap = 20
sailfish_libtype = PARAMS["sailfish_libtype"]
sailfish_options = PARAMS["sailfish_options"]
sailfish_options += " --geneMap %s" % transcript_map
m = PipelineMapping.Sailfish()
statement = m.build((infile,), outfile)
P.run()
@jobs_limit(1, "R")
@mkdir("sailfish.dir/plots.dir")
@merge(runSailfishSaturation,
"sailfish.dir/plots.dir/saturation_plots.sentinel")
def plotSailfishSaturation(infiles, outfile):
''' Plot the relationship between sample sequencing depth and
quantification accuracy'''
plotfile_base = P.snip(outfile, ".sentinel")
bootstrap_sat_plotfile = plotfile_base + "_boostrap_cv.png"
accuracy_sat_plotfile = plotfile_base + "_accuracy.png"
quant_dir = os.path.dirname(os.path.dirname(infiles[0]))
# This is currently hardcoded to expect 10 infiles named:
# (quant.dir)/highest_counts_subset_(n)/quant.sf,
# where (n) is the subset index (0-9)
R('''
library(reshape2)
library(ggplot2)
library(Hmisc)
Path = "%(quant_dir)s"
# following code to read Sailfish binary files borrows from Rob
# Patro's Wasabi R package for making sailfish/salmon output
# compatable with sleuth
minfo <- rjson::fromJSON(file=file.path(
Path, 'highest_counts_subset_9', "aux", "meta_info.json"))
numBoot <- minfo$num_bootstraps
point_df = read.table(file.path(Path, 'highest_counts_subset_9', "quant.sf"),
sep="\t", header=T, row.names=1)
final_cols = NULL
for (ix in seq(0,9,1)){
bootCon <- gzcon(file(file.path(
Path, paste0('highest_counts_subset_', ix), 'aux',
'bootstrap', 'bootstraps.gz'), "rb"))
# read in binary data
boots <- readBin(bootCon, "double",
n = minfo$num_targets * minfo$num_bootstraps)
close(bootCon)
# turn data into dataframe
boots_df = t(data.frame(matrix(unlist(boots),
nrow=minfo$num_bootstraps, byrow=T)))
# add rownames
rownames(boots_df) = rownames(point_df)
final_cols[[paste0("sample_", ix)]] = apply(boots_df, 1,
function(x) sd(x)/mean(x))
}
# make final dataframe with boostrap CVs
final_df = data.frame(do.call("cbind", final_cols))
# add expression values, subset to transcripts with >1 read and bin exp
final_df$max_exp = point_df$NumReads
final_df = final_df[final_df$max_exp>1,]
final_df$max_exp = as.numeric(cut2(final_df$max_exp, g=10))
# melt and aggregate
melted_df = melt(final_df, id="max_exp")
melted_df = melted_df[is.finite(melted_df$value),]
aggdata <-aggregate(melted_df$value,
by=list(melted_df$max_exp, melted_df$variable),
FUN=mean)
aggdata$Group.1 = as.factor(aggdata$Group.1)
m_txt = element_text(size=20)
my_theme = theme(
axis.text=m_txt,
axis.title=m_txt,
legend.text=m_txt,
legend.title=m_txt,
aspect.ratio=1)
p = ggplot(aggdata, aes(10*as.numeric(Group.2), x,
colour=Group.1, group=Group.1)) +
geom_line() +
theme_bw() +
xlab("Sampling depth (%%)") +
ylab("Average Coefficient of variance") +
scale_colour_manual(name="Exp. Decile",
values=colorRampPalette(c("yellow","purple"))(10)) +
scale_x_continuous(breaks=seq(10,100,10), limits=c(10,100)) +
my_theme
ggsave("%(bootstrap_sat_plotfile)s")
# read in the point estimate data
tpm_est = NULL
ref_point_df = read.table(
file.path(Path, 'highest_counts_subset_9', "quant.sf"),
sep="\t", header=T, row.names=1)
for (ix in seq(0,9,1)){
point_df = read.table(
file.path(Path, paste0('highest_counts_subset_', ix), "quant.sf"),
sep="\t", header=T, row.names=1)
tpm_est[[paste0("sample_", ix)]] = (
abs(point_df$TPM - ref_point_df$TPM) / ref_point_df$TPM)
}
tpm_est_df = data.frame(do.call("cbind", tpm_est))
# add expression values, subset to transcripts with >1 read and bin exp.
tpm_est_df$max_exp = point_df$NumReads
tpm_est_df = tpm_est_df[point_df$NumReads>1,]
tpm_est_df$max_exp = as.numeric(cut2(tpm_est_df$max_exp, g=10))
# melt and aggregate
melted_df = melt(tpm_est_df, id="max_exp")
melted_df = melted_df[is.finite(melted_df$value),]
aggdata <-aggregate(melted_df$value,
by=list(melted_df$max_exp, melted_df$variable),
FUN=mean)
aggdata$Group.1 = as.factor(aggdata$Group.1)
p = ggplot(aggdata, aes(10*as.numeric(Group.2), x,
colour=Group.1, group=Group.1)) +
geom_line() +
theme_bw() +
xlab("Sampling depth (%%)") +
ylab("Abs. difference in exp. estimate (normalised)") +
scale_colour_manual(name="Exp. Decile",
values=colorRampPalette(c("yellow","purple"))(10)) +
scale_x_continuous(breaks=seq(10,90,10), limits=c(10,90)) +
my_theme
ggsave("%(accuracy_sat_plotfile)s")
''' % locals())
P.touch(outfile)
###################################################################
# gene coverage profiles
###################################################################
@follows(mkdir("transcriptprofiles.dir"))
@transform(mapReadsWithHisat,
regex("hisat.dir/(\S+).hisat.bam"),
add_inputs(buildCodingExons),
r"transcriptprofiles.dir/\1.transcriptprofile.gz")
def buildTranscriptProfiles(infiles, outfile):
'''build gene coverage profiles
PolyA-RNA-Seq is expected to show a bias towards the 3' end of
transcripts. Here we generate a meta-profile for each sample for
the read depth from the :term:`bam` file across the gene models
defined in the :term:`gtf` gene set
In addition to the outfile specified by the task, plots will be
saved with full and focus views of the meta-profile
Parameters
----------
infiles : list of str
infiles[0] : str
Input filename in :term:`bam` format
infiles[1] : str`
Input filename in :term:`gtf` format
outfile : str
Output filename in :term:`tsv` format
'''
bamfile, gtffile = infiles
job_memory = "8G"
statement = '''cgat bam2geneprofile
--output-filename-pattern="%(outfile)s.%%s"
--force-output
--reporter=transcript
--use-base-accuracy
--method=geneprofile
--method=geneprofileabsolutedistancefromthreeprimeend
--normalize-profile=all
%(bamfile)s %(gtffile)s
| gzip
> %(outfile)s
'''
P.run()
@merge(buildTranscriptProfiles,
"transcriptprofiles.dir/threeprimebiasprofiles.load")
def loadTranscriptProfiles(infiles, outfile):
''' concatenate and load the transcript profiles
Retain sample name as column = "track'''
regex = ("transcriptprofiles.dir/(\S+).transcriptprofile.gz."
"geneprofileabsolutedistancefromthreeprimeend.matrix.tsv.gz")
infiles = [
x + ".geneprofileabsolutedistancefromthreeprimeend.matrix.tsv.gz" for x in infiles]
P.concatenateAndLoad(infiles, outfile, regex_filename=regex)
@merge(SEQUENCEFILES,
"experiment.tsv")
def buildExperimentTable(infiles, outfile):
d = os.getcwd()
try:
project_id = P.getProjectId()
except ValueError:
project_id = "unknown"
with IOTools.openFile(outfile, "w") as outf:
outf.write("id\tname\tproject_id\tdirectory\ttitle\n")
outf.write("\t".join(
("1",
P.getProjectName(),
project_id,
d,
PARAMS.get("title", ""))) + "\n")
@merge(SEQUENCEFILES,
"samples.tsv")
def buildSamplesTable(infiles, outfile):
with IOTools.openFile(outfile, "w") as outf:
outf.write("id\texperiment_id\tsample_name\n")
for sample_id, filename in enumerate(sorted(infiles)):
sample_name, suffix = os.path.basename(filename).split(".", 1)
outf.write("\t".join(
(str(sample_id + 1), "1", sample_name)) + "\n")
@merge(SEQUENCEFILES,
"factors.tsv")
def buildFactorTable(infiles, outfile):
if "factors" not in PARAMS:
raise ValueError("factors not defined in config file")
factor_names = PARAMS.get("factors")
if factor_names is None or factor_names == "!?":
raise ValueError("factors not defined in config file")
factor_names = factor_names.split("-")
sampleID2sampleName = {}
with IOTools.openFile(outfile, "w") as outf:
outf.write("sample_id\tfactor\tfactor_value\n")
for sample_id, filename in enumerate(sorted(infiles)):
sample_name, suffix = os.path.basename(filename).split(".", 1)
sampleID2sampleName[sample_name] = sample_id + 1
parts = sample_name.split("-")
if len(parts) != len(factor_names):
raise ValueError(
"unexpected number of factors in sample {}: "
"expected={}, got={}".format(
filename, factor_names, parts))
for factor, factor_value in zip(factor_names, parts):
if factor == "_":
continue
outf.write("\t".join((str(sample_id + 1),
factor, factor_value)) + "\n")
outf.write("\t".join((str(sample_id + 1), "genome",
PARAMS["genome"])) + "\n")
if os.path.exists("additional_factors.tsv"):
with IOTools.openFile("additional_factors.tsv", "r") as inf:
header = next(inf)
header = header.strip().split("\t")
additional_factors = header[1:]
for line in inf:
line = line.strip().split("\t")
sample_name = line[0]
factors_values = line[1:]
for factor_ix in range(0, len(additional_factors)):
try:
outf.write("\t".join((
str(sampleID2sampleName[sample_name]),
additional_factors[factor_ix],
factors_values[factor_ix])) + "\n")
except KeyError as ke:
sample_names = [os.path.basename(x).split(".")[0]
for x in infiles]
raise KeyError(
"Sample name in additional_factors table does "
" not match up with sample names from raw "
"infiles: %s not in %s" % (
ke, ",".join(sample_names)))
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform((buildExperimentTable, buildSamplesTable, buildFactorTable),
suffix(".tsv"),
".load")
def loadMetaInformation(infile, outfile):
P.load(infile, outfile,
options="--map=id:int "
"--map=sample_id:int "
"--map=experiment_id:int "
"--add-index=id "
"--add-index=experiment_id "
"--add-index=sample_id ")
@transform(buildTranscriptFasta,
suffix("refcoding.fasta"),
"transcripts_attributes.tsv.gz")
def characteriseTranscripts(infile, outfile):
''' obtain attributes for transcripts '''
statement = '''
cat %(infile)s | cgat fasta2table
--split-fasta-identifier --section=na,dn,length -v 0
| gzip > %(outfile)s'''
P.run()
@transform(characteriseTranscripts,
regex("transcripts_attributes.tsv.gz"),
add_inputs(mergeSailfishResults),
"bias_binned_means.tsv")
def summariseBias(infiles, outfile):
def percentage(x):
return float(x[0]) / float(x[1])
def lin_reg_grad(x, y):
slope, intercept, r, p, stderr = linregress(x, y)
return slope
attributes, genes, transcripts = infiles
atr = pd.read_table(attributes, sep='\t', index_col="id")
atr = atr.rename(columns={'pGC': 'GC_Content'})
for di in itertools.product("ATCG", repeat=2):
di = di[0] + di[1]
temp_df = atr.loc[:, [di, "length"]]
atr[di] = temp_df.apply(percentage, axis=1)
drop_cols = (["nAT", "nGC", "pAT", "pA", "pG", "pC", "pT", "nA",
"nG", "nC", "nT", "ncodons",
"mCountsOthers", "nUnk", "nN", "pN"])
atr = atr.drop(drop_cols, axis=1)
atr["length"] = np.log2(atr["length"])
E.info("loading transcripts from {}".format(transcripts))
exp = pd.read_csv(transcripts, sep='\t', index_col="transcript_id")
exp['LogTPM'] = np.log2(exp['TPM'] + 0.1)
merged = atr.join(exp[['sample_id', 'LogTPM']])
def norm(array):
array_min = array.min()
array_max = array.max()
return pd.Series([(x - array_min) / (array_max - array_min) for x in array])
def bin2floats(qcut_bin):
return [qcut_bin.left, qcut_bin.right]
def aggregate_by_factor(df, attribute, sample_names, bins, function):
temp_dict = dict.fromkeys(sample_names, function)
temp_dict[attribute] = function
means_df = df[["LogTPM", "sample_id"]].groupby(
["sample_id", pd.qcut(df.ix[:, attribute], bins)])
means_df = pd.DataFrame(means_df.agg(function))
means_df.reset_index(inplace=True)
atr_values = means_df[attribute]
means_df.drop(attribute, axis=1, inplace=True)
means_df["LogTPM_norm"] = list(
means_df.groupby("sample_id")["LogTPM"].apply(norm))
means_df[attribute] = [np.mean(bin2floats(x)) for x in atr_values]
means_df = pd.melt(means_df, id_vars=[attribute, "sample_id"])
means_df.columns = ["bin", "sample_id", "variable", "value"]
means_df["bias_factor"] = [attribute, ] * len(means_df)
return means_df
means_binned_df = pd.DataFrame()
samples = set(exp.index)
factors = atr.columns.tolist()
for factor in factors:
tmp_df = aggregate_by_factor(
merged, factor, samples, PARAMS["bias_bin"], np.mean)
means_binned_df = pd.concat([means_binned_df, tmp_df], axis=0)
means_binned_df.to_csv(outfile, sep="\t",
index=False, float_format='%.6f')
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(summariseBias,
suffix(".tsv"),
".load")
def loadBias(infile, outfile):
P.load(infile, outfile, options="--add-index=sample_id")
###################################################################
# top genes
###################################################################
@mkdir("sailfish.dir/plots.dir")
@follows(loadSailfishResults, loadMetaInformation)
@originate("sailfish.dir/plots.dir/top_expressed.sentinel")
def plotTopGenesHeatmap(outfile):
'''extract the top 1000 genes (by expression) for each sample and
plot a heatmap of the intersection'''
# if someone can find a nice heatmap plotter from a dissimilarity
# matrix which is compatable with CGATReport, the sqlite and
# pandas code should be changed into a tracker
exp_select_cmd = '''
SELECT TPM, gene_id, sample_name
FROM sailfish_genes AS A
JOIN samples AS B
ON A.sample_id = B.id
'''
dbh = connect()
exp_df = pd.read_sql(exp_select_cmd, dbh)
factors_select_cmd = '''
SELECT factor, factor_value, sample_name
FROM samples AS A
JOIN factors AS B
ON A.id = B.sample_id
'''
top_n = 1000
factors_df = pd.read_sql(factors_select_cmd, dbh)
exp_df['TPM'] = exp_df['TPM'].astype(float)
exp_df_pivot = pd.pivot_table(exp_df, values=["TPM"],
index="gene_id",
columns="sample_name")
# extract the top genes per sample
top_genes = {}
for col in exp_df_pivot.columns:
top_genes[col] = exp_df_pivot[col].sort_values(
ascending=False)[0:top_n].index
# set up the empty df
intersection_df = pd.DataFrame(
index=range(0, len(exp_df_pivot.columns) **
2 - len(exp_df_pivot.columns)),
columns=["sample1", "sample2", "intersection", "fraction"])
# populate the df
n = 0
for col1, col2 in itertools.combinations_with_replacement(exp_df_pivot.columns, 2):
s1_genes = top_genes[col1]
s2_genes = top_genes[col2]
intersection = set(s1_genes).intersection(set(s2_genes))
fraction = len(intersection) / float(top_n)
intersection_df.ix[n] = [col1[1], col2[1], len(intersection), fraction]
n += 1
# if the samples are different, calculate the reverse intersection too
if col1 != col2:
intersection_df.ix[n] = [col2[1], col1[1],
len(intersection), fraction]
n += 1
# pivot to format for heatmap.3 plotting
intersection_df['fraction'] = intersection_df['fraction'].astype('float')
intersection_pivot = pd.pivot_table(
intersection_df, index="sample1", columns="sample2", values="fraction")
for factor in set(factors_df['factor'].tolist()):
print(factor)
print(factors_df)
# don't want to plot coloured by genome
if factor == "genome":
continue
plotfile = "%s_%s.png" % (P.snip(outfile, ".sentinel"), factor)
plotHeatmap = R('''
function(int_df, fact_df){
library(GMD)
library(RColorBrewer)
# subset fact_df to required factor and
# refactor to remove unwanted levels
fact_df = fact_df[fact_df$factor=="%(factor)s",]
rownames(fact_df) = fact_df$sample_name
fact_df$factor_value = factor(fact_df$factor_value)
# set up heatmap side colours
colours = colorRampPalette(
brewer.pal(length(levels(fact_df$factor_value)),"Dark2"))(
length(levels(fact_df$factor_value)))
side_colours = colours[as.numeric((fact_df$factor_value))]
print(side_colours)
# plot
png("%(plotfile)s", width=1000, heigh=1000)
heatmap.3(as.dist(1- as.matrix(int_df)),
Rowv=FALSE, Colv=FALSE,
ColIndividualColors = side_colours,
RowIndividualColors = side_colours,
breaks=100, main="%(factor)s")
dev.off()
}
''' % locals())
plotHeatmap(pandas2ri.py2ri(intersection_pivot),
pandas2ri.py2ri(factors_df))
P.touch(outfile)
###################################################################
# Plot expression distribution
###################################################################
@mkdir("sailfish.dir/plots.dir")
@follows(loadMetaInformation,
loadSailfishResults)
@originate("sailfish.dir/plots.dir/expression_distribution.sentinel")
def plotExpression(outfile):
"Plot the per sample expression distibutions coloured by factor levels"
# Note: this was being done within the pipeline but the size of
# the dataframe seemed to be causing errors:
# "Data values must be of type string or None."
# See RnaseqqcReport.ExpressionDistribution tracker
dbh = connect()
statement = """
SELECT sample_id, transcript_id, TPM
FROM sailfish_transcripts"""
df = pd.read_sql(statement, dbh)
df['logTPM'] = df['TPM'].apply(lambda x: np.log2(x + 0.1))
factors = dbh.execute("SELECT DISTINCT factor FROM factors")
factors = [x[0] for x in factors if x[0] != "genome"]
for factor in factors:
plotfile = P.snip(outfile, ".sentinel") + "_%s.png" % factor
factor_statement = '''
select *
FROM factors
JOIN samples
ON factors.sample_id = samples.id
WHERE factor = "%(factor)s"''' % locals()
factor_df = pd.read_sql(factor_statement, dbh)
full_df = pd.merge(df, factor_df, left_on="sample_id",
right_on="sample_id")
plotDistribution = R('''
function(df){
library(ggplot2)
p = ggplot(df, aes(x=logTPM, group=sample_name,
colour=as.factor(factor_value))) +
geom_density() +
xlab("Log2(TPM)") + ylab("Density") +
scale_colour_discrete(name="Factor Level") +
theme_bw() +
ggtitle("%(factor)s")
ggsave("%(plotfile)s")
}
''' % locals())
plotDistribution(pandas2ri.py2ri(full_df))
P.touch(outfile)
###################################################################
# Run Salmon To Autodetect Strandedness
###################################################################
@follows(mkdir("salmon.dir"))
@transform(buildTranscriptFasta,
regex("(\S+)"),
"salmon.dir/transcripts.salmon.index")
def indexForSalmon(infile, outfile):
'''create a salmon index'''
statement = '''
salmon index -t %(infile)s
-i %(outfile)s '''
P.run()
@transform(SEQUENCEFILES,
SEQUENCEFILES_REGEX,
add_inputs(indexForSalmon,
buildCodingGeneSet,
buildTranscriptGeneMap),
r"salmon.dir/\2/lib_format_counts.json")
def runSalmon(infiles, outfile):
'''quantify abundance using Salmon'''
job_threads = PARAMS["salmon_threads"]
job_memory = PARAMS["salmon_memory"]
infile, index, geneset, transcript_map = infiles
salmon_bootstrap = 1
salmon_libtype = 'A'
salmon_options = PARAMS["salmon_options"]
m = PipelineMapping.Salmon()
statement = m.build((infile,), outfile)
P.run()
@merge(runSalmon, "strandedness.tsv")
def checkStrandednessSalmon(infiles, outfile):
'''
Read the output from salmon used to determine strandedness
and write a table containing the number of alignments
consistent with each type of library.
The possible types are described here:
http://salmon.readthedocs.io/en/latest/library_type.html
'''
results = pd.DataFrame()
for infile in infiles:
j = json.load(open(infile, "r"))
vals = list(j.values())
cols = list(j.keys())
D = pd.DataFrame(vals, index=cols).T
D['sample'] = infile.split("/")[-2]
results = results.append(D)
results = results[["sample", "expected_format",
"compatible_fragment_ratio",
"num_compatible_fragments",
"num_assigned_fragments",
"num_consistent_mappings",
"num_inconsistent_mappings",
"MSF", "OSF", "ISF", "MSR",
"OSR", "ISR", "SF", "SR",
"MU", "OU", "IU", "U"]]
results.to_csv(outfile, sep="\t", index=None)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(checkStrandednessSalmon,
suffix(".tsv"),
".load")
def loadStrandednessSalmon(infile, outfile):
P.load(infile, outfile)
@transform(checkStrandednessSalmon, suffix(".tsv"), ".png")
def plotStrandednessSalmon(infile, outfile):
'''
Plots a bar plot of the salmon strandness estimates
as counts per sample.
'''
sns.set_style('ticks')
tab = pd.read_csv(infile, sep="\t")
counttab = tab[tab.columns[7:]]
f = plt.figure(figsize=(10, 7))
a = f.add_axes([0.1, 0.1, 0.6, 0.75])
x = 0
colors = sns.color_palette("Dark2", 10)
a.set_ylim(0, max(counttab.values[0]) + max(counttab.values[0]) * 0.1)
for item in counttab.columns:
a.bar(range(x, x + len(tab)), tab[item], color=colors)
x += len(tab)
a.ticklabel_format(style='plain')
a.vlines(np.arange(-0.4, a.get_xlim()[1], len(tab)),
a.get_ylim()[0], a.get_ylim()[1], lw=0.5)
a.set_xticks(np.arange(0 + len(tab) / 2, a.get_xlim()[1],
len(tab)))
a.set_xticklabels(counttab.columns)
sns.despine()
patches = []
for c in colors[0:len(tab)]:
patches.append(mpatches.Patch(color=c))
l = f.legend(labels=tab['sample'], handles=patches, loc=1)
f.suptitle('Strandedness Estimates')
f.savefig(outfile)
###################################################################
# Main pipeline tasks
###################################################################
@follows(loadContextStats,
loadBAMStats,
loadTranscriptProfiles,
loadSailfishResults,
loadMetaInformation,
loadBias,
loadPicardRnaSeqMetrics,
loadAltContextStats,
plotSailfishSaturation,
plotTopGenesHeatmap,
plotExpression,
plotStrandednessSalmon,
loadStrandednessSalmon)
def full():
pass
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| mit |
kevin-intel/scikit-learn | sklearn/model_selection/tests/test_successive_halving.py | 3 | 24213 | from math import ceil
import pytest
from scipy.stats import norm, randint
import numpy as np
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import HalvingRandomSearchCV
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.svm import LinearSVC
from sklearn.model_selection._search_successive_halving import (
_SubsampleMetaSplitter, _top_k, _refit_callable)
class FastClassifier(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
def __init__(self, strategy='stratified', random_state=None,
constant=None, **kwargs):
super().__init__(strategy=strategy, random_state=random_state,
constant=constant)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord('a'), ord('z') + 1):
params[chr(char)] = 'whatever'
return params
@pytest.mark.parametrize('Est', (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
('aggressive_elimination,'
'max_resources,'
'expected_n_iterations,'
'expected_n_required_iterations,'
'expected_n_possible_iterations,'
'expected_n_remaining_candidates,'
'expected_n_candidates,'
'expected_n_resources,'), [
# notice how it loops at the beginning
# also, the number of candidates evaluated at the last iteration is
# <= factor
(True, 'limited', 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
# no aggressive elimination: we end up with less iterations, and
# the number of candidates at the last iter is > factor, which isn't
# ideal
(False, 'limited', 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
# # When the amount of resource isn't limited, aggressive_elimination
# # has no effect. Here the default min_resources='exhaust' will take
# # over.
(True, 'unlimited', 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
(False, 'unlimited', 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
]
)
def test_aggressive_elimination(
Est, aggressive_elimination, max_resources, expected_n_iterations,
expected_n_required_iterations, expected_n_possible_iterations,
expected_n_remaining_candidates, expected_n_candidates,
expected_n_resources):
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': ('l1', 'l2'), 'b': list(range(30))}
base_estimator = FastClassifier()
if max_resources == 'limited':
max_resources = 180
else:
max_resources = n_samples
sh = Est(base_estimator, param_grid,
aggressive_elimination=aggressive_elimination,
max_resources=max_resources, factor=3)
sh.set_params(verbose=True) # just for test coverage
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources='exhaust')
sh.fit(X, y)
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
assert sh.n_candidates_ == expected_n_candidates
assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
@pytest.mark.parametrize('Est', (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
('min_resources,'
'max_resources,'
'expected_n_iterations,'
'expected_n_possible_iterations,'
'expected_n_resources,'), [
# with enough resources
('smallest', 'auto', 2, 4, [20, 60]),
# with enough resources but min_resources set manually
(50, 'auto', 2, 3, [50, 150]),
# without enough resources, only one iteration can be done
('smallest', 30, 1, 1, [20]),
# with exhaust: use as much resources as possible at the last iter
('exhaust', 'auto', 2, 2, [333, 999]),
('exhaust', 1000, 2, 2, [333, 999]),
('exhaust', 999, 2, 2, [333, 999]),
('exhaust', 600, 2, 2, [200, 600]),
('exhaust', 599, 2, 2, [199, 597]),
('exhaust', 300, 2, 2, [100, 300]),
('exhaust', 60, 2, 2, [20, 60]),
('exhaust', 50, 1, 1, [20]),
('exhaust', 20, 1, 1, [20]),
]
)
def test_min_max_resources(
Est, min_resources, max_resources, expected_n_iterations,
expected_n_possible_iterations,
expected_n_resources):
# Test the min_resources and max_resources parameters, and how they affect
# the number of resources used at each iteration
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': [1, 2], 'b': [1, 2, 3]}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, factor=3, min_resources=min_resources,
max_resources=max_resources)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=6) # same number as with the grid
sh.fit(X, y)
expected_n_required_iterations = 2 # given 6 combinations and factor = 3
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
if min_resources == 'exhaust':
assert (sh.n_possible_iterations_ == sh.n_iterations_ ==
len(sh.n_resources_))
@pytest.mark.parametrize('Est', (HalvingRandomSearchCV, HalvingGridSearchCV))
@pytest.mark.parametrize(
'max_resources, n_iterations, n_possible_iterations', [
('auto', 5, 9), # all resources are used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_resources == min_resources, only one iteration is
# possible
])
def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_resources
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
param_grid = {'a': [1, 2], 'b': list(range(10))}
base_estimator = FastClassifier()
factor = 2
sh = Est(base_estimator, param_grid, cv=2, factor=factor,
max_resources=max_resources, min_resources=4)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
@pytest.mark.parametrize('Est', (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_resource_parameter(Est):
# Test the resource parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': [1, 2], 'b': list(range(10))}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, cv=2, resource='c',
max_resources=10, factor=3)
sh.fit(X, y)
assert set(sh.n_resources_) == set([1, 3, 9])
for r_i, params, param_c in zip(sh.cv_results_['n_resources'],
sh.cv_results_['params'],
sh.cv_results_['param_c']):
assert r_i == params['c'] == param_c
with pytest.raises(
ValueError,
match='Cannot use resource=1234 which is not supported '):
sh = HalvingGridSearchCV(base_estimator, param_grid, cv=2,
resource='1234', max_resources=10)
sh.fit(X, y)
with pytest.raises(
ValueError,
match='Cannot use parameter c as the resource since it is part '
'of the searched parameters.'):
param_grid = {'a': [1, 2], 'b': [1, 2], 'c': [1, 3]}
sh = HalvingGridSearchCV(base_estimator, param_grid, cv=2,
resource='c', max_resources=10)
sh.fit(X, y)
@pytest.mark.parametrize(
'max_resources, n_candidates, expected_n_candidates', [
(512, 'exhaust', 128), # generate exactly as much as needed
(32, 'exhaust', 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
])
def test_random_search(max_resources, n_candidates, expected_n_candidates):
# Test random search and make sure the number of generated candidates is
# as expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': norm, 'b': norm}
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_grid,
n_candidates=n_candidates, cv=2,
max_resources=max_resources, factor=2,
min_resources=4)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
if n_candidates == 'exhaust':
# Make sure 'exhaust' makes the last iteration use as much resources as
# we can
assert sh.n_resources_[-1] == max_resources
@pytest.mark.parametrize('param_distributions, expected_n_candidates', [
({'a': [1, 2]}, 2), # all lists, sample less than n_candidates
({'a': randint(1, 3)}, 10), # not all list, respect n_candidates
])
def test_random_search_discrete_distributions(param_distributions,
expected_n_candidates):
# Make sure random search samples the appropriate number of candidates when
# we ask for more than what's possible. How many parameters are sampled
# depends whether the distributions are 'all lists' or not (see
# ParameterSampler for details). This is somewhat redundant with the checks
# in ParameterSampler but interaction bugs were discovered during
# developement of SH
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_distributions,
n_candidates=10)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
@pytest.mark.parametrize('Est', (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize('params, expected_error_message', [
({'scoring': {'accuracy', 'accuracy'}},
'Multimetric scoring is not supported'),
({'resource': 'not_a_parameter'},
'Cannot use resource=not_a_parameter which is not supported'),
({'resource': 'a', 'max_resources': 100},
'Cannot use parameter a as the resource since it is part of'),
({'max_resources': 'not_auto'},
'max_resources must be either'),
({'max_resources': 100.5},
'max_resources must be either'),
({'max_resources': -10},
'max_resources must be either'),
({'min_resources': 'bad str'},
'min_resources must be either'),
({'min_resources': 0.5},
'min_resources must be either'),
({'min_resources': -10},
'min_resources must be either'),
({'max_resources': 'auto', 'resource': 'b'},
"max_resources can only be 'auto' if resource='n_samples'"),
({'min_resources': 15, 'max_resources': 14},
"min_resources_=15 is greater than max_resources_=14"),
({'cv': KFold(shuffle=True)}, "must yield consistent folds"),
({'cv': ShuffleSplit()}, "must yield consistent folds"),
])
def test_input_errors(Est, params, expected_error_message):
base_estimator = FastClassifier()
param_grid = {'a': [1]}
X, y = make_classification(100)
sh = Est(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize('params, expected_error_message', [
({'n_candidates': 'exhaust', 'min_resources': 'exhaust'},
"cannot be both set to 'exhaust'"),
({'n_candidates': 'bad'}, "either 'exhaust' or a positive integer"),
({'n_candidates': 0}, "either 'exhaust' or a positive integer"),
])
def test_input_errors_randomized(params, expected_error_message):
# tests specific to HalvingRandomSearchCV
base_estimator = FastClassifier()
param_grid = {'a': [1]}
X, y = make_classification(100)
sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
'fraction, subsample_test, expected_train_size, expected_test_size', [
(.5, True, 40, 10),
(.5, False, 40, 20),
(.2, True, 16, 4),
(.2, False, 16, 20)])
def test_subsample_splitter_shapes(fraction, subsample_test,
expected_train_size, expected_test_size):
# Make sure splits returned by SubsampleMetaSplitter are of appropriate
# size
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(base_cv=KFold(5), fraction=fraction,
subsample_test=subsample_test,
random_state=None)
for train, test in cv.split(X, y):
assert train.shape[0] == expected_train_size
assert test.shape[0] == expected_test_size
if subsample_test:
assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
else:
assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
@pytest.mark.parametrize('subsample_test', (True, False))
def test_subsample_splitter_determinism(subsample_test):
# Make sure _SubsampleMetaSplitter is consistent across calls to split():
# - we're OK having training sets differ (they're always sampled with a
# different fraction anyway)
# - when we don't subsample the test set, we want it to be always the same.
# This check is the most important. This is ensured by the determinism
# of the base_cv.
# Note: we could force both train and test splits to be always the same if
# we drew an int seed in _SubsampleMetaSplitter.__init__
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(base_cv=KFold(5), fraction=.5,
subsample_test=subsample_test,
random_state=None)
folds_a = list(cv.split(X, y, groups=None))
folds_b = list(cv.split(X, y, groups=None))
for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
assert not np.all(train_a == train_b)
if subsample_test:
assert not np.all(test_a == test_b)
else:
assert np.all(test_a == test_b)
assert np.all(X[test_a] == X[test_b])
@pytest.mark.parametrize('k, itr, expected', [
(1, 0, ['c']),
(2, 0, ['a', 'c']),
(4, 0, ['d', 'b', 'a', 'c']),
(10, 0, ['d', 'b', 'a', 'c']),
(1, 1, ['e']),
(2, 1, ['f', 'e']),
(10, 1, ['f', 'e']),
(1, 2, ['i']),
(10, 2, ['g', 'h', 'i']),
])
def test_top_k(k, itr, expected):
results = { # this isn't a 'real world' result dict
'iter': [0, 0, 0, 0, 1, 1, 2, 2, 2],
'mean_test_score': [4, 3, 5, 1, 11, 10, 5, 6, 9],
'params': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'],
}
got = _top_k(results, k=k, itr=itr)
assert np.all(got == expected)
def test_refit_callable():
results = { # this isn't a 'real world' result dict
'iter': np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
'mean_test_score': np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
'params': np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']),
}
assert _refit_callable(results) == 8 # index of 'i'
@pytest.mark.parametrize('Est', (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_cv_results(Est):
# test that the cv_results_ matches correctly the logic of the
# tournament: in particular that the candidates continued in each
# successive iteration are those that were best in the previous iteration
pd = pytest.importorskip('pandas')
rng = np.random.RandomState(0)
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': ('l1', 'l2'), 'b': list(range(30))}
base_estimator = FastClassifier()
# generate random scores: we want to avoid ties, which would otherwise
# mess with the ordering and make testing harder
def scorer(est, X, y):
return rng.rand()
sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources='exhaust')
sh.fit(X, y)
# non-regression check for
# https://github.com/scikit-learn/scikit-learn/issues/19203
assert isinstance(sh.cv_results_['iter'], np.ndarray)
assert isinstance(sh.cv_results_['n_resources'], np.ndarray)
cv_results_df = pd.DataFrame(sh.cv_results_)
# just make sure we don't have ties
assert len(cv_results_df['mean_test_score'].unique()) == len(cv_results_df)
cv_results_df['params_str'] = cv_results_df['params'].apply(str)
table = cv_results_df.pivot(index='params_str', columns='iter',
values='mean_test_score')
# table looks like something like this:
# iter 0 1 2 3 4 5
# params_str
# {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
# {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
# {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
# ...
# where a NaN indicates that the candidate wasn't evaluated at a given
# iteration, because it wasn't part of the top-K at some previous
# iteration. We here make sure that candidates that aren't in the top-k at
# any given iteration are indeed not evaluated at the subsequent
# iterations.
nan_mask = pd.isna(table)
n_iter = sh.n_iterations_
for it in range(n_iter - 1):
already_discarded_mask = nan_mask[it]
# make sure that if a candidate is already discarded, we don't evaluate
# it later
assert (already_discarded_mask & nan_mask[it + 1] ==
already_discarded_mask).all()
# make sure that the number of discarded candidate is correct
discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
kept_mask = ~already_discarded_mask & ~discarded_now_mask
assert kept_mask.sum() == sh.n_candidates_[it + 1]
# make sure that all discarded candidates have a lower score than the
# kept candidates
discarded_max_score = table[it].where(discarded_now_mask).max()
kept_min_score = table[it].where(kept_mask).min()
assert discarded_max_score < kept_min_score
# We now make sure that the best candidate is chosen only from the last
# iteration.
# We also make sure this is true even if there were higher scores in
# earlier rounds (this isn't generally the case, but worth ensuring it's
# possible).
last_iter = cv_results_df['iter'].max()
idx_best_last_iter = (
cv_results_df[cv_results_df['iter'] == last_iter]
['mean_test_score'].idxmax()
)
idx_best_all_iters = cv_results_df['mean_test_score'].idxmax()
assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]['params']
assert (cv_results_df.iloc[idx_best_last_iter]['mean_test_score'] <
cv_results_df.iloc[idx_best_all_iters]['mean_test_score'])
assert (cv_results_df.iloc[idx_best_last_iter]['params'] !=
cv_results_df.iloc[idx_best_all_iters]['params'])
@pytest.mark.parametrize('Est', (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_base_estimator_inputs(Est):
# make sure that the base estimators are passed the correct parameters and
# number of samples at each iteration.
pd = pytest.importorskip('pandas')
passed_n_samples_fit = []
passed_n_samples_predict = []
passed_params = []
class FastClassifierBookKeeping(FastClassifier):
def fit(self, X, y):
passed_n_samples_fit.append(X.shape[0])
return super().fit(X, y)
def predict(self, X):
passed_n_samples_predict.append(X.shape[0])
return super().predict(X)
def set_params(self, **params):
passed_params.append(params)
return super().set_params(**params)
n_samples = 1024
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {'a': ('l1', 'l2'), 'b': list(range(30))}
base_estimator = FastClassifierBookKeeping()
sh = Est(base_estimator, param_grid, factor=2, cv=n_splits,
return_train_score=False, refit=False)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources='exhaust')
sh.fit(X, y)
assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
passed_n_samples = [x + y for (x, y) in zip(passed_n_samples_fit,
passed_n_samples_predict)]
# Lists are of length n_splits * n_iter * n_candidates_at_i.
# Each chunk of size n_splits corresponds to the n_splits folds for the
# same candidate at the same iteration, so they contain equal values. We
# subsample such that the lists are of length n_iter * n_candidates_at_it
passed_n_samples = passed_n_samples[::n_splits]
passed_params = passed_params[::n_splits]
cv_results_df = pd.DataFrame(sh.cv_results_)
assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
uniques, counts = np.unique(passed_n_samples, return_counts=True)
assert (sh.n_resources_ == uniques).all()
assert (sh.n_candidates_ == counts).all()
assert (cv_results_df['params'] == passed_params).all()
assert (cv_results_df['n_resources'] == passed_n_samples).all()
@pytest.mark.parametrize('Est', (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_groups_support(Est):
# Check if ValueError (when groups is None) propagates to
# HalvingGridSearchCV and HalvingRandomSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 50)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),
GroupKFold(n_splits=3), GroupShuffleSplit(random_state=0)]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = Est(clf, grid, cv=cv)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
for cv in non_group_cvs:
gs = Est(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
| bsd-3-clause |
steinnymir/RegAscope2017 | test_scripts/GUI_test/qt_mpl_dataplot.py | 1 | 8289 | """
Series of data are loaded from a .csv file, and their names are
displayed in a checkable list view. The user can select the series
it wants from the list and plot them on a matplotlib canvas.
Use the sample .csv file that comes with the script for an example
of data series.
Eli Bendersky (eliben@gmail.com)
License: this code is in the public domain
Last modified: 18.05.2009
"""
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('PyQt & matplotlib demo: Data plotting')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.update_ui()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
self.update_ui()
def update_ui(self):
if self.data.series_count() > 0 and self.data.series_len() > 0:
self.from_spin.setValue(0)
self.to_spin.setValue(self.data.series_len() - 1)
for w in [self.from_spin, self.to_spin]:
w.setRange(0, self.data.series_len() - 1)
w.setEnabled(True)
else:
for w in [self.from_spin, self.to_spin]:
w.setEnabled(False)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
has_series = False
for row in range(self.series_list_model.rowCount()):
model_index = self.series_list_model.index(row, 0)
checked = self.series_list_model.data(model_index,
Qt.CheckStateRole) == QVariant(Qt.Checked)
name = str(self.series_list_model.data(model_index).toString())
if checked:
has_series = True
x_from = self.from_spin.value()
x_to = self.to_spin.value()
series = self.data.get_series_data(name)[x_from:x_to + 1]
self.axes.plot(range(len(series)), series, 'o-', label=name)
if has_series and self.legend_cb.isChecked():
self.axes.legend()
self.canvas.draw()
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(True)
self.series_list_model.appendRow(item)
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.axes = self.fig.add_subplot(111)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Data series:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
spin_label1 = QLabel('X from')
self.from_spin = QSpinBox()
spin_label2 = QLabel('to')
self.to_spin = QSpinBox()
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(spin_label1)
spins_hbox.addWidget(self.from_spin)
spins_hbox.addWidget(spin_label2)
spins_hbox.addWidget(self.to_spin)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show L&egend")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Show")
self.connect(self.show_button, SIGNAL('clicked()'), self.on_show)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
left_vbox.addWidget(self.mpl_toolbar)
right_vbox = QVBoxLayout()
right_vbox.addWidget(log_label)
right_vbox.addWidget(self.series_list_view)
right_vbox.addLayout(spins_hbox)
right_vbox.addWidget(self.legend_cb)
right_vbox.addWidget(self.show_button)
right_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Please load a data file")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.load_from_file(filename)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count() | mit |
Bismarrck/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops_test.py | 24 | 20551 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for triplet_semihard_loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.losses.python import metric_learning as metric_loss_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance_np(feature, squared=False):
"""Computes the pairwise distance matrix in numpy.
Args:
feature: 2-D numpy array of size [number of data, feature dimension]
squared: Boolean. If true, output is the pairwise squared euclidean
distance matrix; else, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: 2-D numpy array of size
[number of data, number of data].
"""
triu = np.triu_indices(feature.shape[0], 1)
upper_tri_pdists = np.linalg.norm(feature[triu[1]] - feature[triu[0]], axis=1)
if squared:
upper_tri_pdists **= 2.
num_data = feature.shape[0]
pairwise_distances = np.zeros((num_data, num_data))
pairwise_distances[np.triu_indices(num_data, 1)] = upper_tri_pdists
# Make symmetrical.
pairwise_distances = pairwise_distances + pairwise_distances.T - np.diag(
pairwise_distances.diagonal())
return pairwise_distances
class ContrastiveLossTest(test.TestCase):
def testContrastive(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, size=(num_data,)).astype(np.float32)
# Compute the loss in NP
dist = np.sqrt(
np.sum(np.square(embeddings_anchor - embeddings_positive), axis=1))
loss_np = np.mean(
labels * np.square(dist) +
(1.0 - labels) * np.square(np.maximum(margin - dist, 0.0)))
# Compute the loss with TF
loss_tf = metric_loss_ops.contrastive_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class TripletSemiHardLossTest(test.TestCase):
def testTripletSemiHard(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP.
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if adjacency[i][k] == 0:
neg_distances.append(pdist_matrix[i][k])
# Sort by distance.
neg_distances.sort()
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF.
loss_tf = metric_loss_ops.triplet_semihard_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class LiftedStructLossTest(test.TestCase):
def testLiftedStruct(self):
with self.cached_session():
num_data = 10
feat_dim = 6
margin = 1.0
num_classes = 4
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
adjacency = np.equal(labels_reshaped, labels_reshaped.T)
pdist_matrix = pairwise_distance_np(embedding)
loss_np = 0.0
num_constraints = 0.0
for i in range(num_data):
for j in range(num_data):
if adjacency[i][j] > 0.0 and i != j:
d_pos = pdist_matrix[i][j]
negs = []
for k in range(num_data):
if not adjacency[i][k]:
negs.append(margin - pdist_matrix[i][k])
for l in range(num_data):
if not adjacency[j][l]:
negs.append(margin - pdist_matrix[j][l])
negs = np.array(negs)
max_elem = np.max(negs)
negs -= max_elem
negs = np.exp(negs)
soft_maximum = np.log(np.sum(negs)) + max_elem
num_constraints += 1.0
this_loss = max(soft_maximum + d_pos, 0)
loss_np += this_loss * this_loss
loss_np = loss_np / num_constraints / 2.0
# Compute the loss in TF
loss_tf = metric_loss_ops.lifted_struct_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embedding),
margin=margin)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def convert_to_list_of_sparse_tensor(np_matrix):
list_of_sparse_tensors = []
nrows, ncols = np_matrix.shape
for i in range(nrows):
sp_indices = []
for j in range(ncols):
if np_matrix[i][j] == 1:
sp_indices.append([j])
num_non_zeros = len(sp_indices)
list_of_sparse_tensors.append(sparse_tensor.SparseTensor(
indices=np.array(sp_indices),
values=np.ones((num_non_zeros,)),
dense_shape=np.array([ncols,])))
return list_of_sparse_tensors
class NpairsLossTest(test.TestCase):
def testNpairs(self):
with self.cached_session():
num_data = 15
feat_dim = 6
num_classes = 5
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(
0, num_classes, size=(num_data)).astype(np.float32)
# Reshape labels to compute adjacency matrix.
labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.equal(
labels_reshaped, labels_reshaped.T).astype(np.float32)
labels_remapped /= np.sum(labels_remapped, axis=1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
class NpairsLossMultiLabelTest(test.TestCase):
def testNpairsMultiLabelLossWithSingleLabelEqualsNpairsLoss(self):
with self.cached_session():
num_data = 15
feat_dim = 6
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.arange(num_data)
labels = np.reshape(labels, -1)
# Compute vanila npairs loss.
loss_npairs = metric_loss_ops.npairs_loss(
labels=ops.convert_to_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
# Compute npairs multilabel loss.
labels_one_hot = np.identity(num_data)
loss_npairs_multilabel = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels_one_hot),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda).eval()
self.assertAllClose(loss_npairs, loss_npairs_multilabel)
def testNpairsMultiLabel(self):
with self.cached_session():
num_data = 15
feat_dim = 6
num_classes = 10
reg_lambda = 0.02
embeddings_anchor = np.random.rand(num_data, feat_dim).astype(np.float32)
embeddings_positive = np.random.rand(num_data, feat_dim).astype(
np.float32)
labels = np.random.randint(0, 2, (num_data, num_classes))
# set entire column to one so that each row has at least one bit set.
labels[:, -1] = 1
# Compute the loss in NP
reg_term = np.mean(np.sum(np.square(embeddings_anchor), 1))
reg_term += np.mean(np.sum(np.square(embeddings_positive), 1))
reg_term *= 0.25 * reg_lambda
similarity_matrix = np.matmul(embeddings_anchor, embeddings_positive.T)
labels_remapped = np.dot(labels, labels.T).astype(np.float)
labels_remapped /= np.sum(labels_remapped, 1, keepdims=True)
xent_loss = math_ops.reduce_mean(nn.softmax_cross_entropy_with_logits(
logits=ops.convert_to_tensor(similarity_matrix),
labels=ops.convert_to_tensor(labels_remapped))).eval()
loss_np = xent_loss + reg_term
# Compute the loss in TF
loss_tf = metric_loss_ops.npairs_loss_multilabel(
sparse_labels=convert_to_list_of_sparse_tensor(labels),
embeddings_anchor=ops.convert_to_tensor(embeddings_anchor),
embeddings_positive=ops.convert_to_tensor(embeddings_positive),
reg_lambda=reg_lambda)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def compute_ground_truth_cluster_score(feat, y):
y_unique = np.unique(y)
score_gt_np = 0.0
for c in y_unique:
feat_subset = feat[y == c, :]
pdist_subset = pairwise_distance_np(feat_subset)
score_gt_np += -1.0 * np.min(np.sum(pdist_subset, axis=0))
score_gt_np = score_gt_np.astype(np.float32)
return score_gt_np
def compute_cluster_loss_numpy(feat,
y,
margin_multiplier=1.0,
enable_pam_finetuning=True):
if enable_pam_finetuning:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).pam_augmented_fit(feat, y,
margin_multiplier)
else:
facility = ForwardGreedyFacility(
n_clusters=np.unique(y).size).loss_augmented_fit(feat, y,
margin_multiplier)
score_augmented = facility.score_aug_
score_gt = compute_ground_truth_cluster_score(feat, y)
return np.maximum(np.float32(0.0), score_augmented - score_gt)
class ForwardGreedyFacility(object):
def __init__(self, n_clusters=8):
self.n_clusters = n_clusters
self.center_ics_ = None
def _check_init_args(self):
# Check n_clusters.
if (self.n_clusters is None or self.n_clusters <= 0 or
not isinstance(self.n_clusters, int)):
raise ValueError('n_clusters has to be nonnegative integer.')
def loss_augmented_fit(self, feat, y, loss_mult):
"""Fit K-Medoids to the provided data."""
self._check_init_args()
# Check that the array is good and attempt to convert it to
# Numpy array if possible.
feat = self._check_array(feat)
# Apply distance metric to get the distance matrix.
pdists = pairwise_distance_np(feat)
num_data = feat.shape[0]
candidate_ids = list(range(num_data))
candidate_scores = np.zeros(num_data,)
subset = []
k = 0
while k < self.n_clusters:
candidate_scores = []
for i in candidate_ids:
# push i to subset.
subset.append(i)
marginal_cost = -1.0 * np.sum(np.min(pdists[:, subset], axis=1))
loss = 1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset))
candidate_scores.append(marginal_cost + loss_mult * loss)
# remove i from subset.
subset.pop()
# push i_star to subset.
i_star = candidate_ids[np.argmax(candidate_scores)]
subset.append(i_star)
# remove i_star from candidate indices.
candidate_ids.remove(i_star)
k += 1
# Expose labels_ which are the assignments of
# the training data to clusters.
self.labels_ = self._get_cluster_ics(pdists, subset)
# Expose cluster centers, i.e. medoids.
self.cluster_centers_ = feat.take(subset, axis=0)
# Expose indices of chosen cluster centers.
self.center_ics_ = subset
# Expose the score = -\sum_{i \in V} min_{j \in S} || x_i - x_j ||
self.score_ = np.float32(-1.0) * self._get_facility_distance(pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
# Expose the chosen cluster indices.
self.subset_ = subset
return self
def _augmented_update_medoid_ics_in_place(self, pdists, y_gt, cluster_ics,
medoid_ics, loss_mult):
for cluster_idx in range(self.n_clusters):
# y_pred = self._get_cluster_ics(D, medoid_ics)
# Don't prematurely do the assignment step.
# Do this after we've updated all cluster medoids.
y_pred = cluster_ics
if sum(y_pred == cluster_idx) == 0:
# Cluster is empty.
continue
curr_score = (
-1.0 * np.sum(
pdists[medoid_ics[cluster_idx], y_pred == cluster_idx]) +
loss_mult * (1.0 - metrics.normalized_mutual_info_score(
y_gt, y_pred)))
pdist_in = pdists[y_pred == cluster_idx, :]
pdist_in = pdist_in[:, y_pred == cluster_idx]
all_scores_fac = np.sum(-1.0 * pdist_in, axis=1)
all_scores_loss = []
for i in range(y_pred.size):
if y_pred[i] != cluster_idx:
continue
# remove this cluster's current centroid
medoid_ics_i = medoid_ics[:cluster_idx] + medoid_ics[cluster_idx + 1:]
# add this new candidate to the centroid list
medoid_ics_i += [i]
y_pred_i = self._get_cluster_ics(pdists, medoid_ics_i)
all_scores_loss.append(loss_mult * (
1.0 - metrics.normalized_mutual_info_score(y_gt, y_pred_i)))
all_scores = all_scores_fac + all_scores_loss
max_score_idx = np.argmax(all_scores)
max_score = all_scores[max_score_idx]
if max_score > curr_score:
medoid_ics[cluster_idx] = np.where(
y_pred == cluster_idx)[0][max_score_idx]
def pam_augmented_fit(self, feat, y, loss_mult):
pam_max_iter = 5
self._check_init_args()
feat = self._check_array(feat)
pdists = pairwise_distance_np(feat)
self.loss_augmented_fit(feat, y, loss_mult)
print('PAM -1 (before PAM): score: %f, score_aug: %f' % (
self.score_, self.score_aug_))
# Initialize from loss augmented facility location
subset = self.center_ics_
for iter_ in range(pam_max_iter):
# update the cluster assignment
cluster_ics = self._get_cluster_ics(pdists, subset)
# update the medoid for each clusters
self._augmented_update_medoid_ics_in_place(pdists, y, cluster_ics, subset,
loss_mult)
self.score_ = np.float32(-1.0) * self._get_facility_distance(
pdists, subset)
self.score_aug_ = self.score_ + loss_mult * (
1.0 - metrics.normalized_mutual_info_score(
y, self._get_cluster_ics(pdists, subset)))
self.score_aug_ = self.score_aug_.astype(np.float32)
print('PAM iter: %d, score: %f, score_aug: %f' % (iter_, self.score_,
self.score_aug_))
self.center_ics_ = subset
self.labels_ = cluster_ics
return self
def _check_array(self, feat):
# Check that the number of clusters is less than or equal to
# the number of samples
if self.n_clusters > feat.shape[0]:
raise ValueError('The number of medoids ' + '({}) '.format(
self.n_clusters) + 'must be larger than the number ' +
'of samples ({})'.format(feat.shape[0]))
return feat
def _get_cluster_ics(self, pdists, subset):
"""Returns cluster indices for pdist and current medoid indices."""
# Assign data points to clusters based on
# which cluster assignment yields
# the smallest distance`
cluster_ics = np.argmin(pdists[subset, :], axis=0)
return cluster_ics
def _get_facility_distance(self, pdists, subset):
return np.sum(np.min(pdists[subset, :], axis=0))
class ClusterLossTest(test.TestCase):
def _genClusters(self, n_samples, n_clusters):
blobs = datasets.make_blobs(
n_samples=n_samples, centers=n_clusters)
embedding, labels = blobs
embedding = (embedding - embedding.mean(axis=0)) / embedding.std(axis=0)
embedding = embedding.astype(np.float32)
return embedding, labels
def testClusteringLossPAMOff(self):
if not HAS_SKLEARN:
return
with self.cached_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=False)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=False)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
def testClusteringLossPAMOn(self):
if not HAS_SKLEARN:
return
with self.cached_session():
margin_multiplier = 10.0
embeddings, labels = self._genClusters(n_samples=128, n_clusters=64)
loss_np = compute_cluster_loss_numpy(
embeddings, labels, margin_multiplier, enable_pam_finetuning=True)
loss_tf = metric_loss_ops.cluster_loss(
labels=ops.convert_to_tensor(labels),
embeddings=ops.convert_to_tensor(embeddings),
margin_multiplier=margin_multiplier,
enable_pam_finetuning=True)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
test.main()
| apache-2.0 |
bitemyapp/ggplot | ggplot/geoms/geom_boxplot.py | 12 | 1218 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_boxplot(geom):
DEFAULT_AES = {'y': None, 'color': 'black', 'flier_marker': '+'}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
def __group(self, x, y):
out = {}
for xx, yy in zip(x,y):
if yy not in out: out[yy] = []
out[yy].append(xx)
return out
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
color = pinfo.pop('color')
fliermarker = pinfo.pop('flier_marker')
if y is not None:
g = self.__group(x,y)
l = sorted(g.keys())
x = [g[k] for k in l]
q = ax.boxplot(x, vert=False)
plt.setp(q['boxes'], color=color)
plt.setp(q['whiskers'], color=color)
plt.setp(q['fliers'], color=color, marker=fliermarker)
if l:
plt.setp(ax, yticklabels=l)
| bsd-2-clause |
QJonny/CyNest | extras/ConnPlotter/ConnPlotter.py | 4 | 80722 | # ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
#
# This file is part of ConnPlotter.
#
# Copyright (C) 2009 Hans Ekkehard Plesser/UMB
#
# ConnPlotter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# ConnPlotter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ConnPlotter. If not, see <http://www.gnu.org/licenses/>.
"""
ConnPlotter is a tool to create connectivity pattern tables.
For background on ConnPlotter, please see
Eilen Nordlie and Hans Ekkehard Plesser.
Connection Pattern Tables: A new way to visualize connectivity
in neuronal network models.
Frontiers in Neuroinformatics 3:39 (2010)
doi: 10.3389/neuro.11.039.2009
Example:
# code creating population and connection lists
from ConnPlotter import ConnectionPattern, SynType
# Case A: All connections have the same "synapse_model".
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# Each sender must make either excitatory or inhibitory connection,
# not both. When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList)
# Case B: All connections have the same "synapse_model", but violate Dale's law
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# A single sender may have excitatory and inhibitory connections.
# When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList,
synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
# Case C: Synapse models are "AMPA", "NMDA", "GABA_A", "GABA_B".
#
# Connections are plotted by synapse model, with AMPA and NMDA
# on the top row, GABA_A and GABA_B in the bottom row when
# combining by layer. Senders must either have AMPA and NMDA or
# GABA_A and GABA_B synapses, but not both. When computing totals,
# AMPA and NMDA connections are weighted with +1, GABA_A and GABA_B
# with -1.
pattern = ConnectionPattern(layerList, connList)
# Case D: Explicit synapse types.
#
# If your network model uses other synapse types, or you want to use
# other weighting factors when computing totals, or you want different
# colormaps, you must specify synapse type information explicitly for
# ALL synase models in your network. For each synapse model, you create
# a
#
# SynType(name, tweight, cmap)
#
# object, where "name" is the synapse model name, "tweight" the weight
# to be given to the type when computing totals (usually >0 for excit,
# <0 for inhib synapses), and "cmap" the "colormap": if may be a
# matplotlib.colors.Colormap instance or any valid matplotlib color
# specification; in the latter case, as colormap will be generated
# ranging from white to the given color.
# Synapse types are passed as a tuple of tuples. Synapses in a tuple form
# a group. ConnPlotter assumes that a sender may make synapses with all
# types in a single group, but never synapses with types from different
# groups (If you group by transmitter, this simply reflects Dale's law).
# When connections are aggregated by layer, each group is printed on one
# row.
pattern = ConnectionPattern(layerList, connList, synTypes = \
((SynType('Asyn', 1.0, 'orange'),
SynType('Bsyn', 2.5, 'r'),
SynType('Csyn', 0.5, (1.0, 0.5, 0.0))), # end first group
(SynType('Dsyn', -1.5, matplotlib.pylab.cm.jet),
SynType('Esyn', -3.2, '0.95'))))
# See documentation of class ConnectionPattern for more options.
# plotting the pattern
# show connection kernels for all sender-target pairs and all synapse models
pattern.plot()
# combine synapses of all types for each sender-target pair
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True)
# for each pair of sender-target layer pair, show sums for each synapse type
pattern.plot(aggrGroups=True)
# As mode layer, but combine synapse types.
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True, aggrGroups=True)
# Show only synases of the selected type(s)
pattern.plot(mode=('AMPA',))
pattern.plot(mode=('AMPA', 'GABA_A'))
# use same color scales for all patches
pattern.plot(globalColors=True)
# manually specify limits for global color scale
pattern.plot(globalColors=True, colorLimits=[0, 2.5])
# save to file(s)
# NB: do not write to PDF directly, this seems to cause artifacts
pattern.plot(file='net.png')
pattern.plot(file=('net.eps','net.png'))
# You can adjust some properties of the figure by changing the
# default values in plotParams.
# Experimentally, you can dump the connection pattern into a LaTeX table
pattern.toLaTeX('pattern.tex', standalone=True)
# Figure layout can be modified by changing the global variable plotParams.
# Please see the documentation for class PlotParams for details.
# Changes 30 June 2010:
# - Singular layers (extent 0x0) are ignored as target layers.
# The reason for this is so that single-generator "layers" can be
# displayed as input.
# Problems:
# - singularity is not made clear visually
# - This messes up the diagonal shading
# - makes no sense to aggregate any longer
"""
__version__ = '$Revision: 546 $'
__date__ = '$Date: 2010-06-30 16:36:33 +0200 (Wed, 30 Jun 2010) $'
__author__ = 'Hans Ekkehard Plesser'
__all__ = ['ConnectionPattern', 'SynType', 'plotParams', 'PlotParams']
# ----------------------------------------------------------------------------
# To do:
# - proper testsuite
# - layers of different sizes not handled properly
# (find biggest layer extent in each direction, then center;
# may run into problems with population label placement)
# - clean up main
# - color bars
# - "bad color" should be configurable
# - fix hack for colormaps import
# - use generators where possible (eg kernels?)
# ----------------------------------------------------------------------------
# The next is a hack that helps me during development (allows run ConnPlotter),
# should find a better solution.
if __name__ == "__main__":
import colormaps as cm
else:
from . import colormaps as cm
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import warnings
# ----------------------------------------------------------------------------
class SynType(object):
"""
Provide information about how synapse types should be rendered.
A singly nested list of SynType objects can be passed to the
ConnectionPattern constructor to specify layout and rendering info.
"""
def __init__(self, name, relweight, cmap):
"""
Arguments:
name Name of synapse type (string, must be unique)
relweight Relative weight of synapse type when aggregating
across synapse types. Should be negative for inhibitory
connections.
cmap Either a matplotlib.colors.Colormap instance or a
color specification. In the latter case, the colormap
will be built from white to the color given. Thus,
the color should be fully saturated. Colormaps should
have "set_bad(color='white')".
"""
self.name, self.relweight = name, relweight
if isinstance(cmap, mpl.colors.Colormap):
self.cmap = cmap
else:
self.cmap = cm.make_colormap(cmap)
# ----------------------------------------------------------------------------
class PlotParams(object):
"""
Collects parameters governing plotting.
Implemented using properties to ensure they are read-only.
"""
class Margins(object):
"""Width of outer margins, in mm."""
def __init__(self):
"""Set default values."""
self._left = 15.0
self._right = 10.0
self._top = 10.0
self._bottom= 10.0
self._colbar= 10.0
@property
def left(self): return self._left
@left.setter
def left(self, l): self._left = float(l)
@property
def right(self): return self._right
@right.setter
def right(self, r): self._right = float(r)
@property
def top(self): return self._top
@top.setter
def top(self, t): self._top = float(t)
@property
def bottom(self): return self._bottom
@bottom.setter
def bottom(self, b): self._bottom = float(b)
@property
def colbar(self): return self._colbar
@colbar.setter
def colbar(self, b): self._colbar = float(b)
def __init__(self):
"""Set default values"""
self._n_kern = 100
self._patch_size = 20.0 # 20 mm
self._layer_bg = {'super': '0.9', 'diag': '0.8', 'sub': '0.9'}
self._layer_font = mpl.font_manager.FontProperties(size='large')
self._layer_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._pop_font = mpl.font_manager.FontProperties(size='small')
self._pop_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._lgd_tick_font= mpl.font_manager.FontProperties(size='x-small')
self._lgd_title_font=mpl.font_manager.FontProperties(size='xx-small')
self._lgd_ticks = None
self._lgd_tick_fmt = None
self._lgd_location = None
self._cbwidth = None
self._cbspace = None
self._cbheight = None
self._cboffset = None
self._z_layer = 25
self._z_pop = 50
self._z_conn = 100
self.margins = self.Margins()
def reset(self):
"""
Reset to default values.
"""
self.__init__()
@property
def n_kern(self):
"""Sample long kernel dimension at N_kernel points."""
return self._n_kern
@n_kern.setter
def n_kern(self, n):
if n <= 0:
raise ValueError('n_kern > 0 required')
self._n_kern = n
@property
def patch_size(self):
"""Length of the longest edge of the largest patch, in mm."""
return self._patch_size
@patch_size.setter
def patch_size(self, sz):
if sz <= 0:
raise ValueError('patch_size > 0 required')
self._patch_size = sz
@property
def layer_bg(self):
"""
Dictionary of colors for layer background.
Entries "super", "diag", "sub". Each entry
can be set to any valid color specification.
If just a color is given, create dict by
brightening/dimming.
"""
return self._layer_bg
@layer_bg.setter
def layer_bg(self, bg):
if isinstance(bg, dict):
if set(bg.keys()) != set(('super','diag','sub')):
raise ValueError('Background dict must have keys "super", "diag", "sub"')
for bgc in bg.values():
if not mpl.colors.is_color_like(bgc):
raise ValueError('Entries in background dict must be valid color specifications.')
self._layer_bg = bg
elif not mpl.colors.is_color_like(bg):
raise ValueError('layer_bg must be dict or valid color specification.')
else: # is color like
rgb = mpl.colors.colorConverter.to_rgb(bg)
self._layer_bg = {'super': [1.1 * c for c in rgb],
'diag' : rgb,
'sub' : [0.9 * c for c in rgb]}
@property
def layer_font(self):
"""
Font to use for layer labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._layer_font
@layer_font.setter
def layer_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('layer_font must be a matplotlib.font_manager.FontProperties instance.')
self._layer_font = font
@property
def layer_orientation(self):
"""
Orientation of layer labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._layer_orient
@layer_orientation.setter
def layer_orientation(self, orient):
if isinstance(orient, (str,float,int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._layer_orient
tmp.update(orient)
else:
raise ValueError('Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys "sender" and "target".')
self._layer_orient = tmp
@property
def pop_font(self):
"""
Font to use for population labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._pop_font
@pop_font.setter
def pop_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('pop_font must be a matplotlib.font_manager.FontProperties instance.')
self._pop_font = font
@property
def pop_orientation(self):
"""
Orientation of population labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._pop_orient
@pop_orientation.setter
def pop_orientation(self, orient):
if isinstance(orient, (str,float,int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._pop_orient
tmp.update(orient)
else:
raise ValueError('Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys "sender" and "target".')
self._pop_orient = tmp
@property
def legend_tick_font(self):
"""
FontProperties for legend (colorbar) ticks.
"""
return self._lgd_tick_font
@legend_tick_font.setter
def legend_tick_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_tick_font must be a matplotlib.font_manager.FontProperties instance.')
self._lgd_tick_font = font
@property
def legend_title_font(self):
"""
FontProperties for legend (colorbar) titles.
"""
return self._lgd_title_font
@legend_title_font.setter
def legend_title_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_title_font must be a matplotlib.font_manager.FontProperties instance.')
self._lgd_title_font = font
@property
def legend_ticks(self):
"""
Ordered list of values at which legend (colorbar) ticks shall be set.
"""
return self._lgd_ticks
@legend_ticks.setter
def legend_ticks(self, ticks):
self._lgd_ticks = ticks
@property
def legend_tick_format(self):
"""
C-style format string for legend (colorbar) tick marks.
"""
return self._lgd_tick_fmt
@legend_tick_format.setter
def legend_tick_format(self, tickfmt):
self._lgd_tick_fmt = tickfmt
@property
def legend_location(self):
"""
If set to 'top', place legend label above colorbar,
if None, to the left.
"""
return self._lgd_location
@legend_location.setter
def legend_location(self, loc):
self._lgd_location = loc
@property
def cbwidth(self):
"""
Width of single colorbar, relative to figure width.
"""
return self._cbwidth
@cbwidth.setter
def cbwidth(self, cbw):
self._cbwidth = cbw
@property
def cbheight(self):
"""
Height of colorbar, relative to margins.colbar
"""
return self._cbheight
@cbheight.setter
def cbheight(self, cbh):
self._cbheight = cbh
@property
def cbspace(self):
"""
Spacing between colorbars, relative to figure width.
"""
return self._cbspace
@cbspace.setter
def cbspace(self, cbs):
self._cbspace = cbs
@property
def cboffset(self):
"""
Left offset of colorbar, relative to figure width.
"""
return self._cboffset
@cboffset.setter
def cboffset(self, cbo):
self._cboffset = cbo
@property
def z_layer(self):
"""Z-value for layer label axes."""
return self._z_layer
@property
def z_pop(self):
"""Z-value for population label axes."""
return self._z_pop
@property
def z_conn(self):
"""Z-value for connection kernel axes."""
return self._z_conn
# ----------------------------------------------------------------------------
# plotting settings, default values
plotParams = PlotParams()
# ----------------------------------------------------------------------------
class ConnectionPattern(object):
"""
Connection pattern representation for plotting.
When a ConnectionPattern is instantiated, all connection kernels
are pre-computed. They can later be plotted in various forms by
calling the plot() method.
The constructor requires layer and connection lists:
ConnectionPattern(layerList, connList, synTypes, **kwargs)
The layerList is used to:
- determine the size of patches
- determine the block structure
All other information is taken from the connList. Information
about synapses is inferred from the connList.
The following keyword arguments can also be given:
poporder : Population order. A dictionary mapping population names
to numbers; populations will be sorted in diagram in order
of increasing numbers. Otherwise, they are sorted
alphabetically.
intensity: 'wp' - use weight * probability (default)
'p' - use probability alone
'tcd' - use total charge deposited * probability
requires mList and Vmem; per v 0.7 only supported
for ht_neuron.
mList : model list; required for 'tcd'
Vmem : membrane potential; required for 'tcd'
"""
# ------------------------------------------------------------------------
class _LayerProps(object):
"""
Information about layer.
"""
def __init__(self, name, extent):
"""
name : name of layer
extent: spatial extent of the layer
"""
self.name = name
self.ext = extent
self.singular = extent[0] == 0.0 and extent[1] == 0.0
# ------------------------------------------------------------------------
class _SynProps(object):
"""
Information on how to plot patches for a synapse type.
"""
def __init__(self, row, col, tweight, cmap, idx):
"""
row, col: Position of synapse in grid of synapse patches, begins at 0,0
tweight : weight to apply when adding kernels for different synapses
cmap : colormap for synapse type (matplotlib.colors.Colormap instance)
idx : linear index, used to order colorbars in figure
"""
self.r, self.c = row, col
self.tw = tweight
self.cmap = cmap
self.index = idx
# --------------------------------------------------------------------
class _PlotKern(object):
"""
Representing object ready for plotting.
"""
def __init__(self, sl, sn, tl, tn, syn, kern):
"""
sl : sender layer
sn : sender neuron/population
tl : target layer
tn : target neuron/population
syn : synapse model
kern: kernel values (numpy masked array)
All arguments but kern are strings.
"""
self.sl = sl
self.sn = sn
self.tl = tl
self.tn = tn
self.syn = syn
self.kern = kern
# ------------------------------------------------------------------------
class _Connection(object):
def __init__(self, conninfo, layers, synapses, intensity, tcd, Vmem):
"""
Arguments:
conninfo: list of connection info entries: (sender, target, conn_dict)
layers : list of _LayerProps objects
synapses: list of _SynProps objects
intensity: 'wp', 'p', 'tcd'
tcd : tcd object
Vmem : reference membrane potential for tcd calculations
"""
self._intensity = intensity
# get source and target layer
self.slayer, self.tlayer = conninfo[:2]
lnames = [l.name for l in layers]
if not self.slayer in lnames:
raise Exception('Unknown source layer "%s".' % self.slayer)
if not self.tlayer in lnames:
raise Exception('Unknown target layer "%s".' % self.tlayer)
# if target layer is singular (extent==(0,0)) we do not create a full object
self.singular = False
for l in layers:
if l.name == self.tlayer and l.singular:
self.singular = True
return
# see if we connect to/from specific neuron types
cdict = conninfo[2]
if 'sources' in cdict:
if cdict['sources'].keys() == ['model']:
self.snrn = cdict['sources']['model']
else:
raise ValueError('Can only handle sources in form {"model": ...}')
else:
self.snrn = None
if 'targets' in cdict:
if cdict['targets'].keys() == ['model']:
self.tnrn = cdict['targets']['model']
else:
raise ValueError('Can only handle targets in form {"model": ...}')
else:
self.tnrn = None
# now get (mean) weight, we need this if we classify
# connections by sign of weight only
try:
self._mean_wght = _weighteval(cdict['weights'])
except:
raise ValueError('No or corrupt weight information.')
# synapse model
if sorted(synapses.keys()) == ['exc', 'inh']:
# implicit synapse type, we ignore value of
# 'synapse_model', it is for use by NEST only
if self._mean_wght >= 0:
self.synmodel = 'exc'
else:
self.synmodel = 'inh'
else:
try:
self.synmodel = cdict['synapse_model']
if not self.synmodel in synapses:
raise Exception('Unknown synapse model "%s".'
% self.synmodel)
except:
raise Exception('Explicit synapse model info required.')
# store information about connection
try:
self._mask = cdict['mask']
self._kern = cdict['kernel']
self._wght = cdict['weights']
# next line presumes only one layer name will match
self._textent = [tl.ext for tl in layers
if tl.name==self.tlayer][0]
if intensity == 'tcd':
self._tcd = tcd(self.synmodel, self.tnrn, Vmem)
else:
self._tcd = None
except:
raise Exception('Corrupt connection dictionary')
# prepare for lazy evaluation
self._kernel = None
# --------------------------------------------------------------------
@property
def keyval(self):
"""
Return key and _Connection as tuple.
Useful to create dictionary via list comprehension.
"""
if self.singular:
return (None, self)
else:
return ((self.slayer,self.snrn,self.tlayer,self.tnrn,self.synmodel),
self)
# --------------------------------------------------------------------
@property
def kernval(self):
"""Kernel value, as masked array."""
if self._kernel is None:
self._kernel = _evalkernel(self._mask, self._kern, self._mean_wght,
self._textent, self._intensity,
self._tcd)
return self._kernel
# --------------------------------------------------------------------
@property
def mask(self):
"""Dictionary describing the mask."""
return self._mask
# --------------------------------------------------------------------
@property
def kernel(self):
"""Dictionary describing the kernel."""
return self._kern
# --------------------------------------------------------------------
@property
def weight(self):
"""Dictionary describing weight distribution."""
return self._wght
# --------------------------------------------------------------------
def matches(self, sl=None, sn=None, tl=None, tn=None, syn=None):
"""
Return True if all non-None arguments match.
Arguments:
sl : sender layer
sn : sender neuron type
tl : target layer
tn : target neuron type
syn: synapse type
"""
return (sl is None or sl == self.slayer) \
and (sn is None or sn == self.snrn) \
and (tl is None or tl == self.tlayer) \
and (tn is None or tn == self.tnrn) \
and (syn is None or syn == self.synmodel)
# ----------------------------------------------------------------------------
class _Patch(object):
"""
Represents a patch, i.e., an axes that will actually contain an
imshow graphic of a connection kernel.
The patch object contains the physical coordinates of the patch,
as well as a reference to the actual Axes object once it is created.
Also contains strings to be used as sender/target labels.
Everything is based on a coordinate system looking from the top left
corner down.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, width, height,
slabel=None, tlabel=None, parent=None):
"""
Arguments:
left, top : Location of top-left corner
row, col : row, column location in parent block
width, height : Width and height of patch
slabel, tlabel: Values for sender/target label
parent : _Block to which _Patch/_Block belongs
"""
self.l, self.t, self.r, self.c, self.w, self.h = left, top, row, col, width, height
self.slbl, self.tlbl = slabel, tlabel
self.ax = None
self._parent = parent
# --------------------------------------------------------------------
def _update_size(self, new_lr):
"""Update patch size by inspecting all children."""
if new_lr[0] < self.l:
raise ValueError("new_lr[0] = %f < l = %f" % (new_lr[0], self.l))
if new_lr[1] < self.t:
raise ValueError("new_lr[1] = %f < t = %f" % (new_lr[1], self.t))
self.w, self.h = new_lr[0]-self.l, new_lr[1]-self.t
if self._parent:
self._parent._update_size(new_lr)
# --------------------------------------------------------------------
@property
def tl(self):
"""Top left corner of the patch."""
return (self.l, self.t)
# --------------------------------------------------------------------
@property
def lr(self):
"""Lower right corner of the patch."""
return (self.l+self.w, self.t+self.h)
# --------------------------------------------------------------------
@property
def l_patches(self):
"""Left edge of leftmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.l_patches for e in _flattened(self.elements)])
else:
return self.l
# --------------------------------------------------------------------
@property
def t_patches(self):
"""Top edge of topmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.t_patches for e in _flattened(self.elements)])
else:
return self.t
# --------------------------------------------------------------------
@property
def r_patches(self):
"""Right edge of rightmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.r_patches for e in _flattened(self.elements)])
else:
return self.l + self.w
# --------------------------------------------------------------------
@property
def b_patches(self):
"""Bottom edge of lowest _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.b_patches for e in _flattened(self.elements)])
else:
return self.t + self.h
# --------------------------------------------------------------------
@property
def location(self):
if self.r < self.c:
return 'super'
elif self.r == self.c:
return 'diag'
else:
return 'sub'
# ----------------------------------------------------------------------------
class _Block(_Patch):
"""
Represents a block of patches.
A block is initialized with its top left corner and is then built
row-wise downward and column-wise to the right. Rows are added by
block.newRow(2.0, 1.5)
where 2.0 is the space between rows, 1.5 the space between the
first row. Elements are added to a row by
el = block.newElement(1.0, 0.6, 's', 't')
el = block.newElement(1.0, 0.6, 's', 't', size=[2.0, 3.0])
The first example adds a new _Block to the row. 1.0 is the space between
blocks, 0.6 the space before the first block in a row. 's' and 't' are
stored as slbl and tlbl (optional). If size is given, a _Patch with
the given size is created. _Patch is atomic. newElement() returns the
_Block or _Patch created.
"""
# ------------------------------------------------------------------------
def __init__(self, left, top, row, col, slabel=None, tlabel=None, parent=None):
ConnectionPattern._Patch.__init__(self, left, top, row, col, 0, 0, slabel, tlabel, parent)
self.elements = []
self._row_top = None # top of current row
self._row = 0
self._col = 0
# ------------------------------------------------------------------------
def newRow(self, dy=0.0, dynew=0.0):
"""
Open new row of elements.
Arguments:
dy : vertical skip before new row
dynew: vertical skip if new row is first row
"""
if self.elements:
# top of row is bottom of block so far + dy
self._row_top = self.lr[1] + dy
else:
# place relative to top edge of parent
self._row_top = self.tl[1] + dynew
self._row += 1
self._col = 0
self.elements.append([])
# ------------------------------------------------------------------------
def newElement(self, dx=0.0, dxnew=0.0, slabel=None, tlabel=None,
size=None):
"""
Append new element to last row.
Creates _Block instance if size is not given, otherwise _Patch.
Arguments:
dx : horizontal skip before new element
dxnew : horizontal skip if new element is first
slabel: sender label (on y-axis)
tlabel: target label (on x-axis)
size : size of _Patch to create
Returns:
Created _Block or _Patch.
"""
assert(self.elements)
if self.elements[-1]:
# left edge is right edge of block so far + dx
col_left = self.lr[0] + dx
else:
# place relative to left edge of parent
col_left = self.tl[0] + dxnew
self._col += 1
if not size is None:
elem = ConnectionPattern._Patch(col_left, self._row_top, self._row, self._col,
size[0], size[1], slabel, tlabel, self)
else:
elem = ConnectionPattern._Block(col_left, self._row_top, self._row, self._col,
slabel, tlabel, self)
self.elements[-1].append(elem)
self._update_size(elem.lr)
return elem
# ------------------------------------------------------------------------
def addMargin(self, rmarg=0.0, bmarg=0.0):
"""Extend block by margin to right and bottom."""
if rmarg < 0.0: raise ValueError('rmarg must not be negative!')
if bmarg < 0.0: raise ValueError('bmarg must not be negative!')
lr = self.lr
self._update_size((lr[0]+rmarg, lr[1]+bmarg))
# ----------------------------------------------------------------------------
def _prepareAxes(self, mode, showLegend):
"""
Prepare information for all axes, but do not create the actual axes yet.
mode: one of 'detailed', 'by layer', 'totals'
"""
# parameters for figure, all quantities are in mm
patchmax = plotParams.patch_size # length of largest patch patch dimension
# actual parameters scaled from default patchmax = 20mm
lmargin = plotParams.margins.left
tmargin = plotParams.margins.top
rmargin = plotParams.margins.right
bmargin = plotParams.margins.bottom
cbmargin= plotParams.margins.colbar
blksep = 3./20. * patchmax # distance between blocks
popsep = 2./20. * patchmax # distance between populations
synsep = 0.5/20.* patchmax # distance between synapse types
# find maximal extents of individual patches, horizontal and vertical
maxext = max(_flattened([l.ext for l in self._layers]))
patchscale = patchmax / float(maxext) # determines patch size
# obtain number of synaptic patches per population pair
# maximum column across all synapse types, same for rows
nsyncols = max([s.c for s in self._synAttr.values()]) + 1
nsynrows = max([s.r for s in self._synAttr.values()]) + 1
# dictionary mapping into patch-axes, to they can be found later
self._patchTable = {}
# set to store all created patches to avoid multiple
# creation of patches at same location
axset = set()
# create entire setup, top-down
self._axes = self._Block(lmargin, tmargin, 1, 1)
for sl in self._layers:
# get sorted list of populations for sender layer
spops = sorted([p[1] for p in self._pops if p[0] == sl.name],
key=lambda pn: self._poporder[pn])
self._axes.newRow(blksep, 0.0)
for tl in self._layers:
# ignore singular target layers
if tl.singular:
continue
# get sorted list of populations for target layer
tpops = sorted([p[1] for p in self._pops if p[0] == tl.name],
key=lambda pn: self._poporder[pn])
# compute size for patches
patchsize = patchscale * np.array(tl.ext)
block = self._axes.newElement(blksep, 0.0, sl.name, tl.name)
if mode == 'totals':
# single patch
block.newRow(popsep, popsep/2.)
p = block.newElement(popsep, popsep/2., size=patchsize)
self._patchTable[(sl.name, None, tl.name, None, None)] = p
elif mode == 'layer':
# We loop over all rows and columns in the synapse patch grid.
# For each (r,c), we find the pertaining synapse name by reverse
# lookup in the _synAttr dictionary. This is inefficient, but
# should not be too costly overall. But we must create the
# patches in the order they are placed.
# NB: We must create also those block.newElement() that are not
# registered later, since block would otherwise not skip
# over the unused location.
for r in xrange(nsynrows):
block.newRow(synsep, popsep/2.)
for c in xrange(nsyncols):
p = block.newElement(synsep, popsep/2., size=patchsize)
smod = [k for k,s in self._synAttr.iteritems()
if s.r == r and s.c == c]
if smod:
assert(len(smod)==1)
self._patchTable[(sl.name,None,tl.name,None,smod[0])] = p
elif mode == 'population':
# one patch per population pair
for sp in spops:
block.newRow(popsep, popsep/2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep/2., sp, tp)
pblk.newRow(synsep, synsep/2.)
self._patchTable[(sl.name,sp,tl.name,tp,None)] = \
pblk.newElement(synsep, blksep/2., size=patchsize)
else:
# detailed presentation of all pops
for sp in spops:
block.newRow(popsep, popsep/2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep/2., sp, tp)
pblk.newRow(synsep, synsep/2.)
# Find all connections with matching properties
# all information we need here is synapse model.
# We store this in a dictionary mapping synapse
# patch column to synapse model, for use below.
syns = dict([(self._synAttr[c.synmodel].c, c.synmodel)
for c in _flattened(self._cTable.values())
if c.matches(sl.name, sp, tl.name, tp)])
# create all synapse patches
for n in xrange(nsyncols):
# Do not duplicate existing axes.
if (sl.name,sp,tl.name,tp,n) in axset:
continue
# Create patch. We must create also such patches
# that do not have synapses, since spacing would
# go wrong otherwise.
p = pblk.newElement(synsep, 0.0, size=patchsize)
# if patch represents existing synapse, register
if n in syns:
self._patchTable[(sl.name,sp,tl.name,tp,syns[n])] = p
block.addMargin(popsep/2., popsep/2.)
self._axes.addMargin(rmargin, bmargin)
if showLegend:
self._axes.addMargin(0, cbmargin) # add color bar at bottom
figwidth = self._axes.lr[0] - self._axes.tl[0] - rmargin # keep right marg out of calc
if mode == 'totals' or mode == 'population':
# single patch at right edge, 20% of figure
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
else:
lwidth = 0.2 * figwidth
if lwidth > 100.0: # colorbar should not be wider than 10cm
lwidth = 100.0
lheight = plotParams.cbheight*cbmargin if plotParams.cbheight else 0.3*cbmargin
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = self._Patch(self._axes.tl[0],
self._axes.lr[1]- cblift,
None, None,
lwidth,
lheight)
else:
# one patch per synapse type, 20% of figure or less
# we need to get the synapse names in ascending order of synapse indices
snames = [s[0] for s in
sorted([(k,v) for k,v in self._synAttr.iteritems()],
key=lambda kv: kv[1].index)
]
snum = len(snames)
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
if plotParams.cbspace:
lstep = plotParams.cbspace * figwidth
else:
lstep = 0.5 * lwidth
else:
if snum < 5:
lwidth = 0.15 * figwidth
lstep = 0.1 * figwidth
else:
lwidth = figwidth / (snum + 1.0)
lstep = (figwidth - snum*lwidth) / (snum - 1.0)
if lwidth > 100.0: # colorbar should not be wider than 10cm
lwidth = 100.0
lstep = 30.0
lheight = plotParams.cbheight*cbmargin if plotParams.cbheight else 0.3*cbmargin
if plotParams.cboffset is not None:
offset = plotParams.cboffset
else:
offset = lstep
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = {}
for j in xrange(snum):
self._cbPatches[snames[j]] = \
self._Patch(self._axes.tl[0] + offset + j * (lstep + lwidth),
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
# ----------------------------------------------------------------------------
def _scaledBox(self, p):
"""Scaled axes rectangle for patch, reverses y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array([p.l/xsc, 1-(p.t+p.h)/ysc, p.w/xsc, p.h/ysc])
# ----------------------------------------------------------------------------
def _scaledBoxNR(self, p):
"""Scaled axes rectangle for patch, does not reverse y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array([p.l/xsc, p.t/ysc, p.w/xsc, p.h/ysc])
# ----------------------------------------------------------------------------
def _configSynapses(self, cList, synTypes):
"""Configure synapse information based on connections and user info."""
# compile information on synapse types and weights
synnames = set(c[2]['synapse_model'] for c in cList)
synweights = set(_weighteval(c[2]['weights']) for c in cList)
# set up synTypes for all pre-defined cases
if synTypes:
# check if there is info for all synapse types
stnames = _flattened([[s.name for s in r] for r in synTypes])
if len(stnames) != len(set(stnames)):
raise ValueError('Names of synapse types in synTypes must be unique!')
if len(synnames) > 1 and not synnames.issubset(set(stnames)):
raise ValueError('synTypes must provide information about all synapse types.')
elif len(synnames) == 1:
# only one synapse type used
if min(synweights) >= 0:
# all weights positive
synTypes = ((SynType('exc', 1.0, 'red'),),)
elif max(synweights) <= 0:
# all weights negative
synTypes = ((SynType('inh', -1.0, 'blue'),),)
else:
# positive and negative weights, assume Dale holds
synTypes = ((SynType('exc', 1.0, 'red'),),
(SynType('inh', -1.0, 'blue' ),))
elif synnames == set(['AMPA', 'GABA_A']):
# only AMPA and GABA_A
synTypes = ((SynType('AMPA' , 1.0, 'red'),),
(SynType('GABA_A', -1.0, 'blue' ),))
elif synnames.issubset(set(['AMPA','NMDA','GABA_A','GABA_B'])):
synTypes = ((SynType('AMPA' , 1.0, 'red' ),
SynType('NMDA' , 1.0, 'orange' ),),
(SynType('GABA_A', -1.0, 'blue' ),
SynType('GABA_B', -1.0, 'purple' ),))
else:
raise ValueError('Connection list contains unknown synapse models; synTypes required.')
# now build _synAttr by assigning blocks to rows
self._synAttr = {}
row = 0
ctr = 0
for sgroup in synTypes:
col = 0
for stype in sgroup:
self._synAttr[stype.name] = self._SynProps(row, col, stype.relweight,
stype.cmap, ctr)
col += 1
ctr += 1
row += 1
# ----------------------------------------------------------------------------
def __init__(self, lList, cList, synTypes=None, intensity='wp',
mList=None, Vmem=None, poporder=None):
"""
lList : layer list
cList : connection list
synTypes : nested list of synapse types
intensity: 'wp' - weight * probability
'p' - probability
'tcd' - |total charge deposited| * probability
requires mList; currently only for ht_model
proper results only if Vmem within reversal potentials
mList : model list; only needed with 'tcd'
Vmem : reference membrane potential for 'tcd'
poporder : dictionary mapping population names to numbers; populations
will be sorted in diagram in order of increasing numbers.
"""
# extract layers to dict mapping name to extent
self._layers = [self._LayerProps(l[0], l[1]['extent']) for l in lList]
# ensure layer names are unique
lnames = [l.name for l in self._layers]
if len(lnames) != len(set(lnames)):
raise ValueError('Layer names must be unique.')
# set up synapse attributes
self._configSynapses(cList, synTypes)
# if tcd mode, build tcd representation
if intensity != 'tcd':
tcd = None
else:
assert(mList)
import tcd_nest
tcd = tcd_nest.TCD(mList)
# Build internal representation of connections.
# This representation contains one entry for each sender pop, target pop,
# synapse type tuple. Creating the connection object implies computation
# of the kernel.
# Several connection may agree in all properties, these need to be
# added here. Therefore, we need to build iteratively and store
# everything in a dictionary, so we can find early instances.
self._cTable = {}
for conn in cList:
key, val = self._Connection(conn, self._layers, self._synAttr,
intensity, tcd, Vmem).keyval
if key:
if key in self._cTable:
self._cTable[key].append(val)
else:
self._cTable[key] = [val]
# number of layers
self._nlyr = len(self._layers)
# compile list of populations, list(set()) makes list unique
self._pops = list(set(_flattened([[(c.slayer, c.snrn), (c.tlayer, c.tnrn)] \
for c in _flattened(self._cTable.values())])))
self._npop = len(self._pops)
# store population ordering; if not given, use alphabetical ordering
# also add any missing populations alphabetically at end
# layers are ignored
# create alphabetically sorted list of unique population names
popnames = sorted(list(set([p[1] for p in self._pops])))
if poporder:
self._poporder = poporder
next = max(self._poporder.values()) + 1 # next free sorting index
else:
self._poporder = {}
next = 0
for pname in popnames:
if pname not in self._poporder:
self._poporder[pname] = next
next += 1
# compile list of synapse types
self._synTypes = list(set([c.synmodel for c in _flattened(self._cTable.values())]))
# ----------------------------------------------------------------------------
def plot(self, aggrGroups=False, aggrSyns=False, globalColors=False,
colorLimits=None, showLegend=True,
selectSyns=None, file=None, fixedWidth=None):
"""
Plot connection pattern.
By default, connections between any pair of populations
are plotted on the screen, with separate color scales for
all patches.
Arguments:
aggrGroups If True, aggregate projections with the same synapse type
and the same source and target groups (default: False)
aggrSyns If True, aggregate projections with the same synapse model (default: False)
globalColors If True, use global color scale, otherwise local (default: False)
colorLimits If given, must be two element vector for lower and upper limits of
color scale. Implies globalColors (default: None)
showLegend If True, show legend below CPT (default: True).
selectSyns If tuple of synapse models, show only connections of the
give types. Cannot be combined with aggregation.
file If given, save plot to given file name; file may also be a tuple of
file names, the figure will then be saved to all files. This may be
useful if you want to save the same figure in several formats.
You should not save to PDF directly, this may lead to artefacts;
rather save to PS or EPS, then convert.
fixedWidth Figure will be scaled to this width in mm by changing patch size.
Returns:
kern_min, kern_max Minimal and maximal values of kernels, with kern_min <=0, kern_max>=0.
Output:
figure created
"""
# translate new to old paramter names (per v 0.5)
normalize = globalColors
if colorLimits:
normalize = True
if selectSyns:
if aggrPops or aggrSyns:
raise ValueError('selectSyns cannot be combined with aggregation.')
selected = selectSyns
mode = 'select'
elif aggrGroups and aggrSyns:
mode = 'totals'
elif aggrGroups and not aggrSyns:
mode = 'layer'
elif aggrSyns and not aggrGroups:
mode = 'population'
else:
mode = None
if mode == 'layer':
# reduce to dimensions sender layer, target layer, synapse type
# add all kernels agreeing on these three attributes
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
for synmodel in self._synTypes:
kerns = [c.kernval for c in _flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name, syn=synmodel)]
if len(kerns) > 0:
plotKerns.append(self._PlotKern(slayer.name, None, tlayer.name, None, synmodel,
_addKernels(kerns)))
elif mode == 'population':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for spop in self._pops:
for tpop in self._pops:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in _flattened(self._cTable.values())
if c.matches(sl=spop[0], sn=spop[1], tl=tpop[0], tn=tpop[1])]
if len(kerns) > 0:
plotKerns.append(self._PlotKern(spop[0], spop[1], tpop[0], tpop[1], None,
_addKernels(kerns)))
elif mode == 'totals':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in _flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name)]
if len(kerns) > 0:
plotKerns.append(self._PlotKern(slayer.name, None, tlayer.name, None, None,
_addKernels(kerns)))
elif mode == 'select':
# copy only those kernels that have the requested synapse type,
# no dimension reduction
# nb: we need to sum all kernels in the list for a set of attributes
plotKerns = [self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer, clist[0].tnrn,
clist[0].synmodel, _addKernels([c.kernval for c in clist]))
for clist in self._cTable.values() if clist[0].synmodel in selected]
else:
# copy all
# nb: we need to sum all kernels in the list for a set of attributes
plotKerns = [self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer, clist[0].tnrn,
clist[0].synmodel, _addKernels([c.kernval for c in clist]))
for clist in self._cTable.values()]
self._prepareAxes(mode, showLegend)
if fixedWidth:
margs = plotParams.margins.left + plotParams.margins.right
if fixedWidth <= margs:
raise ValueError('Requested width must be less than width of margins (%g mm)' % margs)
currWidth = self._axes.lr[0]
currPatchMax = plotParams.patch_size # store
# compute required patch size
plotParams.patch_size = (fixedWidth - margs) / (currWidth - margs) * currPatchMax
# build new axes
del self._axes
self._prepareAxes(mode, showLegend)
# restore patch size
plotParams.patch_size = currPatchMax
# create figure with desired size
fsize = np.array(self._axes.lr) / 25.4 # convert mm to inches
f = plt.figure(figsize=fsize, facecolor='w')
# size will be rounded according to DPI setting, adjust fsize
dpi = f.get_dpi()
fsize = np.floor(fsize*dpi) / dpi
# check that we got the correct size
actsize =np.array([f.get_figwidth(), f.get_figheight()], dtype=float)
if all(actsize == fsize):
self._figscale = 1.0 # no scaling
else:
warnings.warn("""
WARNING: Figure shrunk on screen!
The figure is shrunk to fit onto the screen.
Please specify a different backend using the -d
option to obtain full-size figures. Your current
backend is: %s
""" % mpl.get_backend())
plt.close(f)
# determine scale: most shrunk dimension
self._figscale = np.min(actsize / fsize)
# create shrunk on-screen figure
f = plt.figure(figsize=self._figscale*fsize, facecolor='w')
# just ensure all is well now
actsize =np.array([f.get_figwidth(), f.get_figheight()], dtype=float)
# add decoration
for block in _flattened(self._axes.elements):
ax = f.add_axes(self._scaledBox(block),
axisbg = plotParams.layer_bg[block.location], xticks=[], yticks=[],
zorder = plotParams.z_layer)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
sp.set_color('none') # turn off axis lines, make room for frame edge
if block.l <= self._axes.l_patches and block.slbl:
ax.set_ylabel(block.slbl,
rotation = plotParams.layer_orientation['sender'],
fontproperties = plotParams.layer_font)
if block.t <= self._axes.t_patches and block.tlbl:
ax.set_xlabel(block.tlbl,
rotation = plotParams.layer_orientation['target'],
fontproperties = plotParams.layer_font)
ax.xaxis.set_label_position('top')
# inner blocks for population labels
if not mode in ('totals', 'layer'):
for pb in _flattened(block.elements):
if not isinstance(pb, self._Block):
continue # should not happen
ax = f.add_axes(self._scaledBox(pb),
axisbg = 'none', xticks=[], yticks=[],
zorder = plotParams.z_pop)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
sp.set_color('none') # turn off axis lines, make room for frame edge
if pb.l+pb.w >= self._axes.r_patches and pb.slbl:
ax.set_ylabel(pb.slbl,
rotation=plotParams.pop_orientation['sender'],
fontproperties=plotParams.pop_font)
ax.yaxis.set_label_position('right')
if pb.t+pb.h >= self._axes.b_patches and pb.tlbl:
ax.set_xlabel(pb.tlbl,
rotation=plotParams.pop_orientation['target'],
fontproperties=plotParams.pop_font)
# determine minimum and maximum values across all kernels, but set min <= 0, max >= 0
kern_max = max(0.0, max([np.max(kern.kern) for kern in plotKerns]))
kern_min = min(0.0, min([np.min(kern.kern) for kern in plotKerns]))
# determine color limits for plots
if colorLimits:
c_min, c_max = colorLimits # explicit values
else:
# default values for color limits
# always 0 as lower limit so anything > 0 is non-white, except when totals or populations
c_min = None if mode in ('totals','population') else 0.0
c_max = None # use patch maximum as upper limit
if normalize:
# use overall maximum, at least 0
c_max = kern_max
if aggrSyns:
# use overall minimum, if negative, otherwise 0
c_min = kern_min
# for c_max, use the larger of the two absolute values
c_max = kern_max
# if c_min is non-zero, use same color scale for neg values
if c_min < 0:
c_min = -c_max
# Initialize dict storing sample patches for each synapse type for use
# in creating color bars. We will store the last patch of any given
# synapse type for reference. When aggrSyns, we have only one patch type
# and store that.
if not aggrSyns:
samplePatches = dict([(sname, None) for sname in self._synAttr.keys()])
else:
# only single type of patches
samplePatches = None
for kern in plotKerns:
p = self._patchTable[(kern.sl,kern.sn,kern.tl,kern.tn,kern.syn)]
p.ax = f.add_axes(self._scaledBox(p), aspect='equal',
xticks=[], yticks=[], zorder=plotParams.z_conn)
p.ax.patch.set_edgecolor('none')
if hasattr(p.ax, 'frame'):
p.ax.frame.set_visible(False)
else:
for sp in p.ax.spines.values():
sp.set_color('none') # turn off axis lines, make room for frame edge
if not aggrSyns:
# we have synapse information -> not totals, a vals positive
assert(kern.syn)
assert(np.min(kern.kern) >= 0.0)
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches[kern.syn] = p.ax.imshow(kern.kern,
vmin = c_min, vmax = c_max,
cmap = self._synAttr[kern.syn].cmap)#,
# interpolation='nearest')
else:
# we have totals, special color table and normalization
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches = p.ax.imshow(kern.kern,
vmin = c_min, vmax = c_max,
cmap = cm.bluered,
norm = cm.ZeroCenterNorm()) #, # must be instance
# interpolation='nearest')
# Create colorbars at bottom of figure
if showLegend:
# Do we have kernel values exceeding the color limits?
if c_min <= kern_min and kern_max <= c_max:
extmode = 'neither'
elif c_min > kern_min and kern_max <= c_max:
extmode = 'min'
elif c_min <= kern_min and kern_max > c_max:
extmode = 'max'
else:
extmode = 'both'
if aggrSyns:
cbax = f.add_axes(self._scaledBox(self._cbPatches))
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
if normalize:
# colorbar with freely settable ticks
cb = f.colorbar(samplePatches, cax = cbax,
orientation = 'horizontal',
ticks = tcks,
format = plotParams.legend_tick_format, extend=extmode)
else:
# colorbar with tick labels 'Exc', 'Inh'
# we add the color bare here explicitly, so we get no problems
# if the sample patch includes only pos or only neg values
cb = mpl.colorbar.ColorbarBase(cbax, cmap=cm.bluered, orientation='horizontal')
cbax.set_xticks([0, 1])
cbax.set_xticklabels(['Inh', 'Exc'])
cb.outline.set_linewidth(0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(), fontproperties=plotParams.legend_tick_font)
# no title in this case
else:
# loop over synapse types
for syn in self._synAttr.keys():
cbax = f.add_axes(self._scaledBox(self._cbPatches[syn]))
if plotParams.legend_location is None:
cbax.set_ylabel(syn, fontproperties=plotParams.legend_title_font,
rotation='horizontal')
else:
cbax.set_title(syn, fontproperties=plotParams.legend_title_font,
rotation='horizontal')
if normalize:
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
# proper colorbar
cb = f.colorbar(samplePatches[syn], cax = cbax,
orientation = 'horizontal',
ticks = tcks,
format = plotParams.legend_tick_format,
extend = extmode)
cb.outline.set_linewidth(0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
else:
# just a solid color bar with no ticks
cbax.set_xticks([])
cbax.set_yticks([])
# full-intensity color from color map
cbax.set_axis_bgcolor(self._synAttr[syn].cmap(1.0))
# narrower border
if hasattr(cbax, 'frame'):
cbax.frame.set_linewidth(0.5)
else:
for sp in cbax.spines.values():
sp.set_linewidth(0.5)
# save to file(s), use full size
f.set_size_inches(fsize)
if isinstance(file, (list,tuple)):
for fn in file:
f.savefig(fn)
elif isinstance(file, str):
f.savefig(file)
f.set_size_inches(actsize) # reset size for further interactive work
return kern_min, kern_max
# ----------------------------------------------------------------------------
def toLaTeX(self, file, standalone = False, enumerate = False, legend = True):
"""
Write connection table to file.
Arguments:
file output file name
standalone create complete LaTeX file (default: False)
enumerate enumerate connections (default: False)
legend add explanation of functions used (default: True)
"""
lfile = open(file, 'w')
if not lfile:
raise Exception('Could not open file "%s"' % file)
if standalone:
lfile.write(\
r"""
\documentclass[a4paper,american]{article}
\usepackage[pdftex,margin=1in,centering,noheadfoot,a4paper]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{color}
\usepackage{calc}
\usepackage{tabularx} % automatically adjusts column width in tables
\usepackage{multirow} % allows entries spanning several rows
\usepackage{colortbl} % allows coloring tables
\usepackage[fleqn]{amsmath}
\setlength{\mathindent}{0em}
\usepackage{mathpazo}
\usepackage[scaled=.95]{helvet}
\renewcommand\familydefault{\sfdefault}
\renewcommand\arraystretch{1.2}
\pagestyle{empty}
% \hdr{ncols}{label}{title}
%
% Typeset header bar across table with ncols columns
% with label at left margin and centered title
%
\newcommand{\hdr}[3]{%
\multicolumn{#1}{|l|}{%
\color{white}\cellcolor[gray]{0.0}%
\textbf{\makebox[0pt]{#2}\hspace{0.5\linewidth}\makebox[0pt][c]{#3}}%
}%
}
\begin{document}
""")
lfile.write(\
r"""
\noindent\begin{tabularx}{\linewidth}{%s|l|l|l|c|c|X|}\hline
\hdr{%d}{}{Connectivity}\\\hline
%s \textbf{Src} & \textbf{Tgt} & \textbf{Syn} &
\textbf{Wght} & \textbf{Mask} & \textbf{Kernel} \\\hline
""" % (('|r',7,'&') if enumerate else ('',6,'')))
# ensure sorting according to keys, gives some alphabetic sorting
haveU, haveG = False, False
cctr = 0 # connection counter
for ckey in sorted(self._cTable.keys()):
for conn in self._cTable[ckey]:
cctr += 1
if enumerate: lfile.write('%d &' % cctr)
# take care to escape _ in names such as GABA_A
# also remove any pending '/None'
lfile.write((r'%s/%s & %s/%s & %s' % \
(conn.slayer, conn.snrn, conn.tlayer, conn.tnrn,
conn.synmodel)).replace('_', r'\_').replace('/None',''))
lfile.write(' & \n')
if isinstance(conn.weight, (int,float)):
lfile.write(r'%g' % conn.weight)
elif 'uniform' in conn.weight:
cw = conn.weight['uniform']
lfile.write(r'$\mathcal{U}[%g, %g)$' % (cw['min'], cw['max']))
haveU = True
else:
raise ValueError('Unkown weight type "%s"' % conn.weight.__str__)
lfile.write(' & \n')
if 'circular' in conn.mask:
lfile.write(r'$\leq %g$' % conn.mask['circular']['radius'])
elif 'rectangular' in conn.mask:
cmr = conn.mask['rectangular']
lfile.write(\
r"""$[(%+g, %+g), (%+g, %+g)]$""" \
% (cmr['lower_left'][0], cmr['lower_left'][1],
cmr['upper_right'][0], cmr['upper_right'][1]))
else:
raise ValueError('Unknown mask type "%s"' % conn.mask.__str__)
lfile.write(' & \n')
if isinstance(conn.kernel, (int, float)):
lfile.write(r'$%g$' % conn.kernel)
elif 'gaussian' in conn.kernel:
ckg = conn.kernel['gaussian']
lfile.write(r'$\mathcal{G}(p_0 = %g, \sigma = %g)$' % \
(ckg['p_center'], ckg['sigma']))
haveG = True
else:
raise ValueError('Unkown kernel type "%s"' % conn.kernel.__str__)
lfile.write('\n')
lfile.write(r'\\\hline' '\n')
if legend and (haveU or haveG):
# add bottom line with legend
lfile.write(r'\hline' '\n')
lfile.write(r'\multicolumn{%d}{|l|}{\footnotesize ' % (7 if enumerate else 6))
if haveG:
lfile.write(r'$\mathcal{G}(p_0, \sigma)$: $p(\mathbf{x})=p_0 e^{-\mathbf{x}^2/2\sigma^2}$')
if haveG and haveU:
lfile.write(r', ')
if haveU:
lfile.write(r'$\mathcal{U}[a, b)$: uniform distribution on $[a, b)$')
lfile.write(r'}\\\hline' '\n')
lfile.write(r'\end{tabularx}' '\n\n')
if standalone:
lfile.write(r'\end{document}''\n')
lfile.close()
# ----------------------------------------------------------------------------
def _evalkernel(mask, kernel, weight, extent, intensity, tcd):
"""
Plot kernel within extent.
Kernel values are multiplied with abs(weight). If weight is a
distribution, the mean value is used.
Result is a masked array, in which the values outside the mask are
masked.
"""
# determine resolution, number of data points
dx = max(extent) / plotParams.n_kern
nx = np.ceil(extent[0] / dx)
ny = np.ceil(extent[1] / dx)
x = np.linspace(-0.5*extent[0], 0.5*extent[0], nx)
y = np.linspace(-0.5*extent[1], 0.5*extent[1], ny)
X, Y = np.meshgrid(x, y)
if intensity == 'wp':
return np.ma.masked_array(abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'p':
return np.ma.masked_array(_kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'tcd':
return np.ma.masked_array(abs(tcd) * abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
# ----------------------------------------------------------------------------
def _weighteval(weight):
"""Returns weight, or mean of distribution, signed."""
w = None
if isinstance(weight, (float, int)):
w = weight
elif isinstance(weight, dict):
assert(len(weight) == 1)
if 'uniform' in weight:
w = 0.5 * (weight['uniform']['min']
+ weight['uniform']['max'])
elif 'gaussian' in weight:
w = weight['gaussian']['mean']
else:
raise Exception('Unknown weight type "%s"' % weight.keys()[0])
if not w:
raise Exception('Cannot handle weight.')
return float(w)
# ----------------------------------------------------------------------------
def _maskeval(x, y, mask):
"""
Evaluate mask given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices.
"""
assert(len(mask)==1)
if 'circular' in mask:
r = mask['circular']['radius']
m = x**2+y**2 <= r**2
elif 'doughnut' in mask:
ri = mask['doughnut']['inner_radius']
ro = mask['doughnut']['outer_radius']
d = x**2 + y**2
m = np.logical_and(ri <= d, d <= ro)
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
m = np.logical_and(np.logical_and(ll[0] <= x, x <= ur[0]),
np.logical_and(ll[1] <= y, y <= ur[1]))
else:
raise Exception('Unknown mask type "%s"' % mask.keys()[0])
return m
# ----------------------------------------------------------------------------
def _kerneval(x, y, fun):
"""
Evaluate function given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices
"""
if isinstance(fun, (float, int)):
return float(fun) * np.ones(np.shape(x))
elif isinstance(fun, dict):
assert(len(fun) == 1)
if 'gaussian' in fun:
g = fun['gaussian']
p0 = g['p_center']
sig = g['sigma']
return p0 * np.exp(-0.5*(x**2+y**2)/sig**2)
else:
raise Exception('Unknown kernel "%s"', fun.keys()[0])
# something very wrong
raise Exception('Cannot handle kernel.')
# ----------------------------------------------------------------------------
def _addKernels(kList):
"""
Add a list of kernels.
Arguments:
kList: List of masked arrays of equal size.
Returns:
Masked array of same size as input. All values are added,
setting masked values to 0. The mask for the sum is the
logical AND of all individual masks, so that only such
values are masked that are masked in all kernels.
_addKernels always returns a new array object, even if
kList has only a single element.
"""
assert(len(kList) > 0)
if len(kList) < 2:
return kList[0].copy()
d = np.ma.filled(kList[0], fill_value = 0).copy()
m = kList[0].mask.copy()
for k in kList[1:]:
d += np.ma.filled(k, fill_value = 0)
m = np.logical_and(m, k.mask)
return np.ma.masked_array(d, m)
# ----------------------------------------------------------------------------
def _flattened(lst):
"""Returned list flattend at first level."""
return sum(lst, [])
# ----------------------------------------------------------------------------
"""
if __name__ == "__main__":
import sys
sys.path += ['./examples']
# import simple
# reload(simple)
cp = ConnectionPattern(simple.layerList, simple.connectList)
import simple2
reload(simple2)
cp2 = ConnectionPattern(simple2.layerList, simple2.connectList)
st3 = ((SynType('GABA_B', -5.0, 'orange'),
SynType('GABA_A', -1.0, 'm')),
(SynType('NMDA', 5.0, 'b'),
SynType('FOO', 1.0, 'aqua'),
SynType('AMPA', 3.0, 'g')))
cp3s = ConnectionPattern(simple2.layerList, simple2.connectList,
synTypes=st3)
import simple3
reload(simple3)
cp3 = ConnectionPattern(simple3.layerList, simple3.connectList)
# cp._prepareAxes('by layer')
# cp2._prepareAxes('by layer')
# cp3._prepareAxes('detailed')
cp2.plot()
cp2.plot(mode='layer')
cp2.plot(mode='population')
cp2.plot(mode='totals')
cp2.plot(mode=('AMPA',))
cp2.plot(mode=('AMPA','GABA_B'))
# cp3.plot()
# cp3.plot(mode='population')
# cp3.plot(mode='layer')
# cp3.plot(mode='totals')
# cp.plot(normalize=True)
# cp.plot(totals=True, normalize=True)
# cp2.plot()
# cp2.plot(file=('cp3.eps'))
# cp2.plot(byLayer=True)
# cp2.plot(totals=True)
"""
| gpl-2.0 |
sandiegodata/age-friendly-communities | users/david/RCFE_Capacity.py | 1 | 2036 | """
@author: David Albrecht
anaconda 4.2.13
python 3.5.2
pandas 0.19.1
Dataset Name:
CDSS RCFE List - https://secure.dss.ca.gov/CareFacilitySearch/Home/DownloadData
Purpose:
Answer the first two bullet points within the first question block, "RCFE Capacity":
1) What is the number of RCFEs in a given community as defined above?
Answer stored in count_RCFEs dictionary
2) What is the capacity (by licensed bed) in a given community?
Answer stored in capacity_RCFEs dictionary
Notes:
1) Manually deleted columns V and onward since they created errors when reading into a df and are not relevant
2) Filtered the df to exclude any facility that is in closed, pending, or unlicensed status
3) INFO: Zillow lists 132 different zip codes in San Diego County
"""
#import necessary libraries
import pandas as pd
import os
#change working directory
os.chdir('')
#create dataframe and filter for RCFEs only in San Diego County
q1_df = pd.read_csv('ResidentialElderCareFacility01012017.csv')
q1_df = q1_df[q1_df['County Name'] == 'SAN DIEGO']
q1_df = q1_df[q1_df['Facility Status'] != 'CLOSED']
q1_df = q1_df[q1_df['Facility Status'] != 'PENDING']
q1_df = q1_df[q1_df['Facility Status'] != 'UNLICENSED']
#create a list of unique zipcodes within San Diego County
unique_zips = list(q1_df['Facility Zip'].unique())
#create two empty dictionaries using the list of unique zipcodes
count_RCFEs = {}
capacity_RCFEs = {}
for unique_zipcode in unique_zips:
count_RCFEs[unique_zipcode] = 0
capacity_RCFEs[unique_zipcode] = 0
#find the number of RCFEs in each zipcode: count_RCFEs
#find the capacity within each zipcode: capacity_RCFEs
for unique_zipcode in unique_zips:
for all_zipcode in q1_df['Facility Zip']:
if unique_zipcode == all_zipcode:
count_RCFEs[unique_zipcode] += 1
temp_df = q1_df[q1_df['Facility Zip'] == unique_zipcode]
capacity_RCFEs[unique_zipcode] = temp_df['Facility Capacity'].sum()
| mit |
IndraVikas/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
davidam/python-examples | scikit/plot_voting_regressor.py | 6 | 1745 | """
=================================================
Plot individual and voting regression predictions
=================================================
.. currentmodule:: sklearn
Plot individual and averaged regression predictions for Boston dataset.
First, three exemplary regressors are initialized
(:class:`~ensemble.GradientBoostingRegressor`,
:class:`~ensemble.RandomForestRegressor`, and
:class:`~linear_model.LinearRegression`) and used to initialize a
:class:`~ensemble.VotingRegressor`.
The red starred dots are the averaged predictions.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import VotingRegressor
# Loading some example data
X, y = datasets.load_boston(return_X_y=True)
# Training classifiers
reg1 = GradientBoostingRegressor(random_state=1, n_estimators=10)
reg2 = RandomForestRegressor(random_state=1, n_estimators=10)
reg3 = LinearRegression()
ereg = VotingRegressor([('gb', reg1), ('rf', reg2), ('lr', reg3)])
reg1.fit(X, y)
reg2.fit(X, y)
reg3.fit(X, y)
ereg.fit(X, y)
xt = X[:20]
plt.figure()
plt.plot(reg1.predict(xt), 'gd', label='GradientBoostingRegressor')
plt.plot(reg2.predict(xt), 'b^', label='RandomForestRegressor')
plt.plot(reg3.predict(xt), 'ys', label='LinearRegression')
plt.plot(ereg.predict(xt), 'r*', label='VotingRegressor')
plt.tick_params(axis='x', which='both', bottom=False, top=False,
labelbottom=False)
plt.ylabel('predicted')
plt.xlabel('training samples')
plt.legend(loc="best")
plt.title('Comparison of individual predictions with averaged')
plt.show()
| gpl-3.0 |
Windy-Ground/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
bptripp/grasp-convnet | py/perspective.py | 1 | 30382 | __author__ = 'bptripp'
from os import listdir
from os.path import isfile, join
import time
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from PIL import Image
from scipy.optimize import bisect
from quaternion import angle_between_quaterions, to_quaternion
def get_random_points(n, radius, surface=False):
point_directions = np.random.randn(3, n)
norms = np.sum(point_directions**2, axis=0)**.5
points = radius * point_directions / norms
if not surface:
# points = points * np.random.rand(n)**(1./3.)
palm = .035 #TODO: whoops, this is in metres, not fraction of radius
points = points * (palm + (1-palm)*np.random.rand(n))
return points
def get_random_angles(n, std=np.pi/8.):
"""
:param n: Number of angles needed
:return: Random angles in restricted ranges, meant as deviations in perspective around
looking staight at something.
"""
angles = std*np.random.randn(3, n)
angles[2,:] = 2*np.pi*np.random.rand(1, n)
return angles
def get_rotation_matrix(point, angle):
"""
:param point: Location of camera
:param angle: Not what you expect: this is a list of angles relative to looking
at (0,0,0), about world-z (azimuth), camera-y (elevation), and camera-z (roll).
Random samples are produced by get_random_angles().
:return: just what you expect
"""
z = -point #location of (0,0,0) relative to point
alpha = np.arctan(z[1]/z[0])
if z[0] < 0: alpha = alpha + np.pi
if alpha < 0: alpha = alpha + 2.*np.pi
alpha = alpha + angle[0]
# rotate by alpha about z
Rz = np.array([[np.cos(alpha), -np.sin(alpha), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]])
# find elevation in new coordinates
beta = -np.arctan(np.sqrt(z[0]**2+z[1]**2)/z[2])
if z[2] < 0: beta = beta + np.pi
if beta < 0: beta = beta + 2.*np.pi
beta = beta + angle[1]
# rotate by beta about y
Ry = np.array([[np.cos(beta), 0, -np.sin(beta)], [0, 1, 0], [np.sin(beta), 0, np.cos(beta)]])
gamma = angle[2]
Rz2 = np.array([[np.cos(-gamma), -np.sin(-gamma), 0], [np.sin(-gamma), np.cos(-gamma), 0], [0, 0, 1]])
return np.dot(Rz, np.dot(Ry, Rz2))
def check_rotation_matrix(scatter=False):
from mpl_toolkits.mplot3d import axes3d, Axes3D
n = 6
points = get_random_points(n, 2)
angles = get_random_angles(n)
# point = np.array([1,1e-6,1e-6])
# point = np.array([1e-6,1,1e-6])
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
for i in range(points.shape[1]):
point = points[:,i]
angle = angles[:,i]
if not scatter:
angle[0] = 0
angle[1] = 0
R = get_rotation_matrix(point, angle)
ax.scatter(0, 0, 0, color='b')
ax.scatter(point[0], point[1], point[2], color='r')
x = np.dot(R, np.array([1,0,0]))
y = np.dot(R, np.array([0,1,0]))
z = np.dot(R, np.array([0,0,1]))
ax.plot([point[0],point[0]+x[0]], [point[1],point[1]+x[1]], [point[2],point[2]+x[2]], color='r')
ax.plot([point[0],point[0]+y[0]], [point[1],point[1]+y[1]], [point[2],point[2]+y[2]], color='g')
ax.plot([point[0],point[0]+z[0]], [point[1],point[1]+z[1]], [point[2],point[2]+z[2]], color='b')
plt.xlabel('x')
plt.ylabel('y')
plt.ylabel('z')
if not scatter:
plt.title('blue axes should point AT blue dot (zero)')
else:
plt.title('blue axes should point NEAR blue dot (zero)')
plt.show()
def check_depth_from_random_perspective():
from depthmap import loadOBJ, Display
filename = '../data/obj_files/24_bowl-02-Mar-2016-07-03-29.obj'
verts, faces = loadOBJ(filename)
# put vertical centre at zero
verts = np.array(verts)
minz = np.min(verts, axis=0)[2]
maxz = np.max(verts, axis=0)[2]
verts[:,2] = verts[:,2] - (minz+maxz)/2
n = 6
points = get_random_points(n, .25)
angles = get_random_angles(n)
point = points[:,0]
angle = angles[:,0]
rot = get_rotation_matrix(point, angle)
im_width = 80
d = Display(imsize=(im_width,im_width))
d.set_camera_position(point, rot, .5)
d.set_mesh(verts, faces)
depth = d.read_depth()
d.close()
X = np.arange(0, im_width)
Y = np.arange(0, im_width)
X, Y = np.meshgrid(X, Y)
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot_wireframe(X, Y, depth)
ax.set_xlabel('x')
plt.show()
def find_vertical(point):
"""
Find new angle[2] so that camera-up points up. In terms of rotation matrix,
R[2,0] should be 0 (x-axis horizontal) and R[2,1] should be positive (pointing
up rather than down).
"""
def f(gamma):
return get_rotation_matrix(point, np.array([0, 0, gamma]))[2][0]
gamma = bisect(f, 0, np.pi)
# if get_rotation_matrix(point, np.array([0, 0, gamma]))[2][1] < 0:
if get_rotation_matrix(point, np.array([0, 0, gamma]))[2][1] > 0:
gamma = gamma + np.pi
return gamma
def check_find_vertical():
n = 3
points = get_random_points(n, .35, surface=True)
for i in range(n):
point = points[:,i]
gamma = find_vertical(point)
rot = get_rotation_matrix(point, np.array([0, 0, gamma]))
print(rot)
# if np.abs(rot[2,0] > 1e-6) or rot[2,1] < 0:
if np.abs(rot[2,0] > 1e-6) or rot[2,1] > 0:
print('error with gamma: ' + str(gamma) + ' should be 0: ' + str(rot[2,0]) + ' should be +ve: ' + str(rot[2,1]))
def plot_random_samples():
n = 1000
points = get_random_points(n, .25)
angles = get_random_angles(n)
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,2,1,projection='3d')
ax.scatter(points[0,:], points[1,:], points[2,:])
ax = fig.add_subplot(1,2,2,projection='3d')
ax.scatter(angles[0,:], angles[1,:], angles[2,:])
plt.show()
def get_perspectives(obj_filename, points, angles, im_width=80, near_clip=.25, far_clip=0.8, fov=45, camera_offset=.45, target_point=None):
from depthmap import loadOBJ, Display, get_distance
verts, faces = loadOBJ(obj_filename)
# put vertical centre at zero
verts = np.array(verts)
min_bounding_box = np.min(verts, axis=0)
max_bounding_box = np.max(verts, axis=0)
# set bounding box centre to 0,0,0
verts[:,0] = verts[:,0] - (min_bounding_box[0]+max_bounding_box[0])/2.
verts[:,1] = verts[:,1] - (min_bounding_box[1]+max_bounding_box[1])/2.
verts[:,2] = verts[:,2] - (min_bounding_box[2]+max_bounding_box[2])/2.
if target_point is not None:
verts[:,0] = verts[:,0] - target_point[0]
verts[:,1] = verts[:,1] - target_point[1]
verts[:,2] = verts[:,2] - target_point[2]
d = Display(imsize=(im_width,im_width))
d.set_perspective(fov=fov, near_clip=near_clip, far_clip=far_clip)
perspectives = np.zeros((points.shape[1],im_width,im_width), dtype='float32')
for i in range(points.shape[1]):
point = points[:,i]
angle = angles[:,i]
rot = get_rotation_matrix(point, angle)
d.set_camera_position(point, rot, camera_offset)
d.set_mesh(verts, faces)
depth = d.read_depth()
distance = get_distance(depth, near_clip, far_clip)
perspectives[i,:,:] = distance
d.close()
return perspectives
def process_directory(obj_dir, data_dir, n):
from os import listdir
from os.path import isfile, join
import time
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
points = get_random_points(n, .15)
angles = get_random_angles(n, std=0)
print(angles)
perspectives = get_perspectives(obj_filename, points, angles)
f = open(data_filename, 'wb')
cPickle.dump((points, angles, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def process_eye_directory(obj_dir, data_dir, n):
#TODO: save image files here to allow random ordering during training
from os import listdir
from os.path import isfile, join
import time
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
points = get_random_points(n, .35, surface=True) #.75m with offset
angles = np.zeros_like(points)
# Set camera-up to vertical via third angle (angle needed is always
# 3pi/4, but we'll find it numerically in case other parts of code
# change while we're not looking).
for i in range(n):
angles[2,i] = find_vertical(points[:,i])
perspectives = get_perspectives(obj_filename, points, angles, near_clip=.4, fov=30)
f = open(data_filename, 'wb')
cPickle.dump((points, angles, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def check_maps(data_dir):
"""
Checks pkl files in given directory to see if any of the depth maps they contain
are empty.
"""
from os import listdir
from os.path import isfile, join
for f in listdir(data_dir):
data_filename = join(data_dir, f)
if isfile(data_filename) and f.endswith('.pkl'):
print('Checking ' + f)
f = open(data_filename, 'rb')
(points, angles, perspectives) = cPickle.load(f)
f.close()
for i in range(perspectives.shape[0]):
sd = np.std(perspectives[i,:,:].flatten())
if sd < 1e-3:
print(' map ' + str(i) + ' is empty')
def calculate_metrics(perspectives, im_width=80, fov=45.0, camera_offset=.45):
"""
:param perspectives: numpy array of depth images of object from gripper perspective
"""
asymmetry_scale = 13.0 #TODO: calculate from camera params (13 pixels is ~5cm with default params)
from heuristic import finger_path_template, calculate_grip_metrics
finger_path = finger_path_template(fov*np.pi/180., im_width, camera_offset)
collision_template = np.zeros_like(finger_path)
collision_template[finger_path > 0] = camera_offset + 0.033
# print(np.max(collision_template))
# print(np.max(finger_path))
# plt.imshow(collision_template)
# plt.show()
metrics = []
collisions = []
for perspective in perspectives:
intersections, qualities = calculate_grip_metrics(perspective, finger_path)
q1 = qualities[0]
q2 = qualities[1]
q3 = qualities[2]
if intersections[0] is None or intersections[2] is None:
a1 = 1
else:
a1 = ((intersections[0]-intersections[2])/asymmetry_scale)**2
if intersections[1] is None or intersections[2] is None:
a2 = 1
else:
a2 = ((intersections[1]-intersections[2])/asymmetry_scale)**2
m = np.minimum((q1+q2)/1.5, q3) / (1 + (q1*a1+q2*a2) / (q1+q2+1e-6))
collision = np.max(collision_template - perspective) > 0
collisions.append(collision)
# if collision:
# m = 0
metrics.append(m)
# plt.subplot(1,2,1)
# plt.imshow(perspective)
# plt.subplot(1,2,2)
# plt.imshow(np.maximum(0, finger_path-perspective))
# print(collision)
# print((a1,a2))
# print(intersections)
# print(qualities)
# print('metric: ' + str(m))
# plt.show()
# print((intersections, qualities))
return metrics, collisions
def get_quaternion_distance(points, angles):
"""
Get new representation of camera/gripper configurations as rotation quaternions and
distances from origin, rather than 3D points and rotations about axis pointing to origin.
"""
# print(points)
# print(angles)
quaternions = []
distances = []
for point, angle in zip(points.T, angles.T):
distances.append(np.linalg.norm(point))
quaternions.append(to_quaternion(get_rotation_matrix(point, angle)))
return np.array(quaternions), np.array(distances)
def smooth_metrics(quaternions, distances, metrics):
from interpolate import interpolate
smoothed = []
for i in range(len(metrics)):
# print(i)
interpolated = interpolate(quaternions[i], distances[i], quaternions, distances, metrics,
sigma_d=.02, sigma_a=(16*np.pi/180))
smoothed.append(interpolated)
# print(interpolated - metrics[one])
return smoothed
def load_target_points(filename):
objects = []
indices = []
points = []
for line in open(filename, "r"):
vals = line.translate(None, '"\n').split(',')
assert len(vals) == 5
objects.append(vals[0])
indices.append(int(vals[1]))
points.append([float(vals[2]), float(vals[3]), float(vals[4])])
return objects, indices, points
def get_target_points_for_object(objects, indices, points, object):
indices_for_object = []
points_for_object = []
for o, i, p in zip(objects, indices, points):
if o == object:
indices_for_object.append(i)
points_for_object.append(p)
return np.array(indices_for_object), np.array(points_for_object)
def check_target_points():
objects, indices, points = load_target_points('../../grasp-conv/data/obj-points.csv')
print(objects)
print(indices)
print(points)
indices, points = get_target_points_for_object(objects, indices, points, '28_Spatula_final-11-Nov-2015-14-22-01.obj')
print(indices)
print(points)
def check_metrics():
# points, angles, metrics, collisions = calculate_metrics('../../grasp-conv/data/perspectives/28_Spatula_final-11-Nov-2015-14-22-01.pkl')
# with open('spatula-perspectives.pkl', 'wb') as f:
# cPickle.dump((points, angles, metrics, collisions), f)
with open('spatula-perspectives.pkl', 'rb') as f:
(points, angles, metrics, collisions) = cPickle.load(f)
metrics = np.array(metrics)
smoothed = smooth_metrics(points, angles, metrics)
with open('spatula-perspectives-smoothed.pkl', 'wb') as f:
cPickle.dump((points, angles, metrics, collisions, smoothed), f)
plt.hist(metrics, bins=50)
plt.show()
def make_grip_perspective_depths(obj_dir, data_dir, target_points_file, n=1000):
objects, indices, points = load_target_points(target_points_file)
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
target_indices, target_points = get_target_points_for_object(objects, indices, points, f)
start_time = time.time()
#TODO: is there any reason to make points & angles these the same or different across targets?
gripper_points = get_random_points(n, .15)
gripper_angles = get_random_angles(n, std=0)
perspectives = []
for target_point in target_points:
print(' ' + str(target_point))
p = get_perspectives(obj_filename, gripper_points, gripper_angles, target_point=target_point)
perspectives.append(p)
f = open(data_filename, 'wb')
cPickle.dump((gripper_points, gripper_angles, target_indices, target_points, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def make_metrics(perspective_dir, metric_dir):
"""
We'll store in separate pkl files per object to allow incremental processing, even through results
won't take much memory.
"""
for f in listdir(perspective_dir):
perspective_filename = join(perspective_dir, f)
if isfile(perspective_filename) and f.endswith('.pkl'):
metric_filename = join(metric_dir, f[:-4] + '-metrics.pkl')
if isfile(metric_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
with open(perspective_filename) as perspective_file:
gripper_points, gripper_angles, target_indices, target_points, perspectives = cPickle.load(perspective_file)
print('unpickle: ' + str(time.time() - start_time))
quaternions, distances = get_quaternion_distance(gripper_points, gripper_angles)
collisions = []
free_smoothed = []
coll_smoothed = []
for p in perspectives: # one per target point
fm, c = calculate_metrics(p)
fm = np.array(fm)
c = np.array(c)
fs = smooth_metrics(quaternions, distances, fm)
cm = fm * (1-c)
cs = smooth_metrics(quaternions, distances, cm)
collisions.append(c)
free_smoothed.append(fs)
coll_smoothed.append(cs)
f = open(metric_filename, 'wb')
cPickle.dump((gripper_points, gripper_angles, target_indices, target_points,
collisions, free_smoothed, coll_smoothed), f)
f.close()
def make_eye_perspective_depths(obj_dir, data_dir, target_points_file, n=20):
all_objects, all_target_indices, all_target_points = load_target_points(target_points_file)
camera_offset=.45
near_clip=.6
far_clip=1.0
eye_points = []
eye_angles = []
objects = []
target_indices = []
target_points = []
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'): #and int(f[0]) >= 0: #TODO: set f[0] range here
print('Processing ' + f)
ti, tp= get_target_points_for_object(all_objects, all_target_indices, all_target_points, f)
objects.append(f)
target_indices.append(ti)
target_points.append(tp)
start_time = time.time()
points = get_random_points(n, .35, surface=True) #.8m with offset
# points = np.array([[ 0.00001], [0.35], [0.00001]]) # TODO: bug with x=0 and with z=0
print(points)
angles = np.zeros_like(points)
eye_points.append(points)
eye_angles.append(angles)
# Set camera-up to vertical via third angle (angle needed is always
# 3pi/4, but we'll find it numerically in case other parts of code
# change while we're not looking).
for i in range(n):
angles[2,i] = find_vertical(points[:,i])
perspectives = []
for target_index, target_point in zip(ti, tp):
print(' ' + str(target_point))
perspectives = get_perspectives(obj_filename, points, angles,
near_clip=near_clip, far_clip=far_clip, camera_offset=camera_offset,
fov=30, target_point=target_point)
for i in range(len(perspectives)):
distance = perspectives[i]
rescaled_distance = np.maximum(0, (distance-camera_offset)/(far_clip-camera_offset))
imfile = data_dir + f[:-4] + '-' + str(target_index) + '-' + str(i) + '.png'
Image.fromarray((255.0*rescaled_distance).astype('uint8')).save(imfile)
print(' ' + str(time.time()-start_time) + 's')
data_filename = join(data_dir, 'eye-perspectives-murata.pkl')
f = open(data_filename, 'wb')
cPickle.dump((objects, target_indices, target_points, eye_points, eye_angles), f)
f.close()
def merge_eye_perspectives(data_dir):
# make_eye_perspective_depths mysteriously does not run all the way through, so I've
# done it in two parts which are merged here:
files = ['eye-perspectives1.pkl', 'eye-perspectives2.pkl']
objects = []
target_indices = []
target_points = []
eye_points = []
eye_angles = []
for file in files:
with open(join(data_dir, file), 'rb') as f:
o, ti, tp, ep, ea = cPickle.load(f)
objects.extend(o)
target_indices.extend(ti)
target_points.extend(tp)
eye_points.extend(ep)
eye_angles.extend(ea)
with open(join(data_dir, 'eye-perspectives.pkl'),'wb') as f:
cPickle.dump((objects, target_indices, target_points, eye_points, eye_angles), f)
def export_neuron_perspectives():
import csv
with open('../data/neuron-points.pkl', 'rb') as f:
neuron_points, neuron_angles = cPickle.load(f)
with open('neuron-perspectives.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for point, angle in zip(neuron_points.T, neuron_angles.T):
R = get_rotation_matrix(point, angle)
row = list(point)
row.extend(R.flatten())
writer.writerow(row)
def export_eye_perspectives(eye_perspectives_file):
import csv
with open(eye_perspectives_file) as f:
objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
with open('eye-perspectives.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for object, ep, ea, tp in zip(objects, eye_points, eye_angles, target_points):
print('Processing ' + object)
for target_point in tp:
for eye_point, eye_angle in zip(ep.T, ea.T):
eye_R = get_rotation_matrix(eye_point, eye_angle)
# row = [object]
row = []
row.extend(target_point)
row.extend(eye_point)
row.extend(eye_R.flatten())
writer.writerow(row)
def make_relative_metrics(eye_perspectives_file, metrics_dir, result_dir, n=500, neuron_points=None, neuron_angles=None):
from quaternion import difference_between_quaternions
from interpolate import interpolate
# each of these points/angles will correspond to an output neuron ...
if neuron_points is None or neuron_angles is None:
neuron_points = get_random_points(n, .15)
neuron_angles = get_random_angles(n, std=0)
with open(join(result_dir, 'neuron-points.pkl'), 'wb') as f:
cPickle.dump((neuron_points, neuron_angles), f)
neuron_quaternions, neuron_distances = get_quaternion_distance(neuron_points, neuron_angles)
with open(eye_perspectives_file) as f:
objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
for object, object_eye_points, object_eye_angles in zip(objects, eye_points, eye_angles):
print('Processing ' + object)
start_time = time.time()
eye_quaternions, eye_distances = get_quaternion_distance(object_eye_points, object_eye_angles)
metrics_file = join(metrics_dir, object[:-4] + '-metrics.pkl')
with open(metrics_file) as f:
gripper_points, gripper_angles, target_indices, target_points, collisions, free_smoothed, coll_smoothed = cPickle.load(f)
gripper_quaternions, gripper_distances = get_quaternion_distance(gripper_points, gripper_angles)
# note that for each object, gripper configs are the same relative to each target point
#TODO: do we want coll_smoothed instead / as well?
metrics = free_smoothed
# interpolate relative to each eye point
neuron_metrics_for_object = []
for target_index, target_metrics in zip(target_indices, metrics):
print(' target ' + str(target_index))
neuron_metrics_for_target = []
for eye_quaternion in eye_quaternions:
rel_quaternions = []
for gripper_quaternion in gripper_quaternions:
rel_quaternions.append(difference_between_quaternions(eye_quaternion, gripper_quaternion))
rel_quaternions = np.array(rel_quaternions)
#interpolate ...
neuron_metrics = []
for neuron_quaternion, neuron_distance in zip(neuron_quaternions, neuron_distances):
interpolated = interpolate(neuron_quaternion, neuron_distance, rel_quaternions, gripper_distances, target_metrics,
sigma_d=.02, sigma_a=(16*np.pi/180))
neuron_metrics.append(interpolated)
neuron_metrics_for_target.append(neuron_metrics)
neuron_metrics_for_object.append(neuron_metrics_for_target)
neuron_metrics_for_object = np.array(neuron_metrics_for_object)
result_file = join(result_dir, object[:-4] + '-neuron.pkl')
with open(result_file, 'wb') as f:
cPickle.dump((target_indices, target_points, object_eye_points, object_eye_angles, neuron_metrics_for_object), f)
print(' ' + str(time.time() - start_time) + 's')
def make_XY():
eye_image_files = []
metrics = []
return eye_image_files, metrics
if __name__ == '__main__':
# check_rotation_matrix(scatter=True)
# check_depth_from_random_perspective()
# plot_random_samples()
# check_find_vertical()
# check_target_points()
# check_metrics()
# make_grip_perspective_depths('../../grasp-conv/data/obj_tmp/',
# '../../grasp-conv/data/perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# make_grip_perspective_depths('../../grasp-conv/data/obj_files/',
# '/Volumes/TrainingData/grasp-conv/data/perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# make_eye_perspective_depths('../../grasp-conv/data/obj_tmp/',
# '../../grasp-conv/data/eye-tmp/',
# '../../grasp-conv/data/obj-points.csv')
make_eye_perspective_depths('../../grasp-conv/data/obj_files_murata/',
'../../grasp-conv/data/eye-perspectives-murata/',
'../../grasp-conv/data/obj-points-murata.csv',
n=1)
# make_eye_perspective_depths('../../grasp-conv/data/obj_files/',
# '/Volumes/TrainingData/grasp-conv/data/eye-perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# merge_eye_perspectives('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/')
# with open('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl','rb') as f:
# objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
# print(objects)
# print(target_indices)
# print(target_points)
# print(eye_angles)
# make_relative_metrics('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl',
# '/Volumes/TrainingData/grasp-conv/data/metrics/',
# '/Volumes/TrainingData/grasp-conv/data/relative/')
# export_neuron_perspectives()
# export_eye_perspectives('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl')
# import scipy
# image = scipy.misc.imread('../../grasp-conv/data/eye-tmp/1_Coffeecup_final-03-Mar-2016-18-50-40-0-7.png')
# plt.imshow(image)
# plt.show()
# with open('../../grasp-conv/data/eye-tmp/eye-perspectives.pkl') as f:
# objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
# print(objects)
# print(target_indices)
# print(target_points)
# print(np.array(eye_points))
# print(np.array(eye_angles))
# with open('spatula-perspectives.pkl', 'rb') as f:
# gripper_points, gripper_angles, target_indices, target_points, perspectives = cPickle.load(f)
# make_metrics('../../grasp-conv/data/perspectives/', '../../grasp-conv/data/metrics/')
# make_relative_metrics('../../grasp-conv/data/eye-tmp/eye-perspectives.pkl',
# '../../grasp-conv/data/metrics/',
# '../../grasp-conv/data/relative/')
# checking files look OK ...
# with open('../../grasp-conv/data/relative/neuron-points.pkl', 'rb') as f:
# neuron_points, neuron_angles = cPickle.load(f)
# print(neuron_angles.shape)
# with open('../../grasp-conv/data/relative/1_Coffeecup_final-03-Mar-2016-18-50-40-neuron.pkl', 'rb') as f:
# target_indices, target_points, object_eye_points, object_eye_angles, neuron_metrics_for_object = cPickle.load(f)
# print(neuron_metrics_for_object.shape)
# print(np.min(neuron_metrics_for_object))
# print(np.max(neuron_metrics_for_object))
# print(np.std(neuron_metrics_for_object))
# make_metrics('/Volumes/TrainingData/grasp-conv/data/perspectives/',
# '/Volumes/TrainingData/grasp-conv/data/metrics/')
# process_eye_directory('../../grasp-conv/data/obj_tmp/', '../../grasp-conv/data/eye-perspectives-tmp/', 100)
# process_directory('../data/obj_files/', '../data/perspectives/', 10)
# process_directory('../../grasp-conv/data/obj_tmp/', '../../grasp-conv/data/perspectives/', 5000)
# check_maps('../../grasp-conv/data/perspectives/')
| mit |
rosspalmer/DataTools | depr/0.2.5/dtools/holder.py | 1 | 2726 |
from .formatting import format
from .source import data_source
import pandas as pd
class data_holder(object):
def __init__(self):
self.ds = {}
self.x = None
self.y = None
self.current_ds = None
self.current_index = []
self.remain_index = []
self.subs = {}
self.default_sub = None
def load_csv(self, name, filepath, y_col=None, x_col=None,
id_col=None, sep=','):
df = pd.read_csv(filepath, sep=sep)
if id_col is not None:
df = df.set_index(id_col)
if y_col is not None:
x = df.drop(y_col, axis=1)
y = df[y_col]
else:
x = df
y = None
if x_col is not None:
x = x[x_col]
self.load_ds(name, x, y)
def load_ds(self, name, x, y=None):
ds = data_source()
ds.x = x
if y is not None:
ds.y = y
else:
ds.y = pd.DataFrame()
self.ds[name] = ds
def partition(self, ratio=1.0):
self.x, self.y, self.remain_index = \
self.ds[self.current_ds].partition(ratio, self.remain_index)
self.current_index = self.x.index
if self.default_sub is not None:
self.use_sub(self.default_sub)
def reset_ds(self):
self.x = self.ds[self.current_ds].x
self.y = self.ds[self.current_ds].y
def update_ds(self):
self.ds[self.current_ds].x = self.x
self.ds[self.current_ds].y = self.y
def use_ds(self, name, default_sub=None, new=False):
self.current_ds = name
if new:
self.remain_index = self.ds[name].x.index.tolist()
self.current_index = self.ds[name].x.index.tolist()
self.x = self.ds[name].x.loc[self.remain_index]
if self.ds[name].y is None:
self.y = None
else:
self.y = self.ds[name].y.loc[self.remain_index]
self.default_sub = default_sub
def format(self, mode):
self.x, self.y = format(mode, self.x, self.y)
def create_sub(self, sub_name, col_filter=None, row_filter=None,
col_dummy=None, col_normalize=None):
self.subs[sub_name] = {'col_filter':col_filter, 'row_filter':row_filter,
'col_dummy':col_dummy, 'col_normalize':col_normalize}
def use_sub(self, sub_name, output_only=False):
x, y = self.ds[self.current_ds].subset(self.subs[sub_name])
x = x.loc[self.current_index]
y = y.loc[self.current_index]
if output_only:
return x, y
if not output_only:
self.x = x
self.y = y
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py | 6 | 21463 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| mit |
saiphcita/crowdsource-platform | fixtures/createJson.py | 16 | 2463 | __author__ = 'Megha'
# Script to transfer csv containing data about various models to json
# Input csv file constituting of the model data
# Output json file representing the csv data as json object
# Assumes model name to be first line
# Field names of the model on the second line
# Data seperated by __DELIM__
# Example:
# L01 ModelName: registrationmodel
# L02 FieldNames: user,activation_key,created_timestamp,last_updated
# L03 Data: 1,qwer,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
# L04 Data: 2,assd,2015-05-01T00:17:40.085Z,2015-05-01T00:17:40.085Z
import numpy as np
import pandas as pd
import json as json
__MODULE_NAME__ = 7 #Number of lines after which Model Name
__INPUT_FILE__ = 'meghaWorkerData.csv'
__OUTPUT_FILE__ = 'meghaWorkerData.json'
__NEWLINE__ = '\n'
__KEY1__ = 0
__KEY2__ = 0
__DELIM__ = ','
__APPEND__ = 'crowdsourcing.'
__KEY_MODEL__ = 'model'
__KEY_FIELDS__ = 'fields'
__KEY_PK__ = 'pk'
def create_dict(input_dict, module, data_collection):
for key, value in input_dict.items():
data_dict = {}
data_dict[__KEY_FIELDS__] = value
data_dict[__KEY_PK__] = key
data_dict[__KEY_MODEL__] = __APPEND__ + module
data_collection.append(data_dict)
return data_collection
def create_data_json(__FILE__):
in_fp = open(__INPUT_FILE__, 'rb')
file_lines = in_fp.readlines()
in_fp.close()
data_collection = []
for line_no in range(0, len(file_lines)):
if line_no % __MODULE_NAME__ == 0:
columns = file_lines[line_no + 1].strip(__NEWLINE__).split(__DELIM__)
instance1 = file_lines[line_no + 2].strip(__NEWLINE__).split(__DELIM__)
instance2 = file_lines[line_no + 3].strip(__NEWLINE__).split(__DELIM__)
instance3 = file_lines[line_no + 4].strip(__NEWLINE__).split(__DELIM__)
instance4 = file_lines[line_no + 5].strip(__NEWLINE__).split(__DELIM__)
instance5 = file_lines[line_no + 6].strip(__NEWLINE__).split(__DELIM__)
data = np.array([instance1,instance2,instance3,instance4,instance5])
df = pd.DataFrame(data, columns = columns)
create_dict(df.transpose().to_dict(), file_lines[line_no].strip(__NEWLINE__), data_collection)
del(df)
print(data_collection)
out_fp = open(__OUTPUT_FILE__, 'wb')
out_fp.write(json.dumps(data_collection, indent = 2))
out_fp.close()
if __name__ == '__main__':
create_data_json (__INPUT_FILE__) | mit |
xuewei4d/scikit-learn | sklearn/linear_model/tests/test_sag.py | 8 | 32096 | # Authors: Danny Sullivan <dbsullivan23@gmail.com>
# Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import math
import pytest
import numpy as np
import scipy.sparse as sp
from scipy.special import logsumexp
from sklearn.linear_model._sag import get_auto_step_size
from sklearn.linear_model._sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model._base import make_dataset
from sklearn.linear_model._logistic import _multinomial_loss_grad
from sklearn.utils.extmath import row_norms
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_raise_message
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris, make_classification
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True, saga=False):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if saga:
weights -= (gradient_correction *
step_size * (1 - 1. / len(seen)))
if fit_intercept:
gradient_correction = (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay) + gradient_correction
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True, saga=False, random_state=0):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - (gradient_memory[idx] * entry)
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= (gradient_correction[j] * step_size *
(1 - 1. / len(seen)) / wscale)
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1. - 1. / len(seen))
if saga:
intercept -= ((step_size * intercept_sum_gradient /
len(seen) * decay) +
gradient_correction)
else:
intercept -= (step_size * intercept_sum_gradient /
len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1)) +
fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
for solver in ['sag', 'saga']:
if solver == 'sag':
n_iter = 80
else:
# SAGA variance w.r.t. stream order is higher
n_iter = 300
clf = LogisticRegression(solver=solver, fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10,
multi_class='ovr')
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == 'saga')
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_allclose(weights1, clf.coef_)
assert_allclose(intercept1, clf.intercept_)
assert_allclose(weights2, clf.coef_)
assert_allclose(intercept2, clf.intercept_)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10, multi_class='ovr')
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10, multi_class='ovr')
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 100
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter,
random_state=rng)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept,
random_state=rng)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept,
random_state=rng)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
# assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
# assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
n_samples = X.shape[0]
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for saga in [True, False]:
for fit_intercept in (True, False):
if saga:
L_sqr = (max_squared_sum + alpha + int(fit_intercept))
L_log = (max_squared_sum + 4.0 * alpha +
int(fit_intercept)) / 4.0
mun_sqr = min(2 * n_samples * alpha, L_sqr)
mun_log = min(2 * n_samples * alpha, L_log)
step_size_sqr = 1 / (2 * L_sqr + mun_sqr)
step_size_log = 1 / (2 * L_log + mun_log)
else:
step_size_sqr = 1.0 / (max_squared_sum +
alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha,
"squared",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept,
n_samples=n_samples,
is_saga=saga)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@pytest.mark.parametrize("seed", range(3)) # locally tested with 1000 seeds
def test_sag_regressor(seed):
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 300
tol = .001
max_iter = 100
alpha = 0.1
rng = np.random.RandomState(seed)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples, random_state=rng)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.98
assert score2 > 0.98
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.45
assert score2 > 0.45
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept, multi_class='ovr')
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept, multi_class='ovr')
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept, multi_class='ovr',
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y),
y=y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@pytest.mark.filterwarnings('ignore:The max_iter was reached')
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept, multi_class='ovr',
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y),
y=y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
@pytest.mark.parametrize("solver", ["sag", "saga"])
def test_sag_classifier_raises_error(solver):
# Following #13316, the error handling behavior changed in cython sag. This
# is simply a non-regression test to make sure numerical errors are
# properly raised.
# Train a classifier on a simple problem
rng = np.random.RandomState(42)
X, y = make_classification(random_state=rng)
clf = LogisticRegression(solver=solver, random_state=rng, warm_start=True)
clf.fit(X, y)
# Trigger a numerical error by:
# - corrupting the fitted coefficients of the classifier
# - fit it again starting from its current state thanks to warm_start
clf.coef_[:] = np.nan
with pytest.raises(ValueError, match="Floating-point under-/overflow"):
clf.fit(X, y)
| bsd-3-clause |
HarryRybacki/SensorDataResearchReproduction | workbook.py | 1 | 3412 | import matplotlib.pyplot as pyplot
import helpers
"""
Begin phase one - Read Input
Note: This phase will need slight tweaking for each data source as they do not follow a truly standard data format.
As a result, interfaces will likely need to be written for each source akin to read_ibrl_data()
Note: While I had originally intended to use Pandas DataFrame objects to store / manipulate the data I opted out as
it expects each column (sensor) to be of the same length (number of measurements) but that isn't the case with the IBRL
dataset. I could cast each column to a Pandas Series (1 dimensional vector object) and add each Series to the DataFrame
but that might be overkill. For know I am using a simple dictionary mapping each sensor to a list of tuples containing
temperature and humidity data.
"""
# location of IBRL sensor measurements dataset
ibrl_sensor_measurements_file = "./datasets/Reduced2530K.csv"
# Create dictionary of original sensors mapping to their measurements
# x1, x2, ..., xn where xi = (ti', hi') and X = (T, H)
raw_measurements = helpers.read_ibrl_data(ibrl_sensor_measurements_file)
"""
Begin phase two - Process Data
"""
# 1A - Transformation
# Shuffle measurements
shuffled_measurements = helpers.randomize_readings(raw_measurements)
# Calculate successive differences
differences, lookup_table = helpers.generate_differences(shuffled_measurements)
# Sensor One Test Data
sensor_one_data = raw_measurements['2']
sensor_one_shuffle = shuffled_measurements['2']
sensor_one_differences = differences['2']
pyplot.figure(1)
# Plot of Original Data
pyplot.subplot(311) # Num rows, num cols, figure num
for sensor in raw_measurements:
pyplot.plot([reading[0] for reading in raw_measurements[sensor]],
[reading[1] for reading in raw_measurements[sensor]],
'ro')
pyplot.axis([0, 100, 0, 100]) # [xmin, xmax, ymin, ymax]
pyplot.xlabel('Temperature')
pyplot.ylabel('Humidity')
pyplot.title('Original Data')
# Plot of Shuffled Sensor One Data
pyplot.subplot(312)
for sensor in shuffled_measurements:
pyplot.plot([reading[0] for reading in shuffled_measurements[sensor]],
[reading[1] for reading in shuffled_measurements[sensor]],
'ro')
pyplot.axis([0, 100, 0, 100]) # [xmin, xmax, ymin, ymax]
pyplot.xlabel('Temperature')
pyplot.ylabel('Humidity')
pyplot.title('Randomized Data')
# Plot of Original Sensor One Data
pyplot.subplot(313)
for sensor in differences:
pyplot.plot([reading[0] for reading in differences[sensor]],
[reading[1] for reading in differences[sensor]],
'ro')
pyplot.axis([-50, 50, -50, 50]) # [xmin, xmax, ymin, ymax]
pyplot.xlabel('Temperature')
pyplot.ylabel('Humidity')
pyplot.title('Successive Differences')
pyplot.tight_layout()
# Save the plot
pyplot.savefig('wsn_data_transformation.png')
# 1B - Ellipsoid Boundary Modeling
print len(differences['11'])
print len(shuffled_measurements['11'])
pyplot.figure(2)
min_temp, max_temp = helpers.get_min_max_temp(differences['3'])
pyplot.subplot(311)
pyplot.plot([reading[0] for reading in differences['5']],
[reading[1] for reading in differences['5']])
pyplot.xlabel('Temperature')
pyplot.ylabel('Humidity')
pyplot.title('Successive Differences from Sensor 5')
pyplot.show()
# 1C - Inverse Transformation
"""
Begin phase four - Visually model data
"""
print "Completed..." | apache-2.0 |
rseubert/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
logston/plottags | setup.py | 1 | 1160 | from setuptools import setup
import plottags
with open("LICENSE") as fd:
LICENSE = fd.read()
with open("README.rst") as fd:
README = fd.read()
setup(
name='plottags',
version=plottags.__version__,
description='A package for plotting the tag history of repositories',
license=LICENSE,
long_description=README,
author=plottags.__author__,
author_email=plottags.__email__,
url='https://github.com/logston/plottags',
packages=['plottags'],
include_package_data=True,
test_suite='tests',
keywords=['repository', 'git', 'hg', 'mercurial', 'plot', 'tag', 'tags'],
entry_points={
'console_scripts': ['plottags=plottags.controller:main'],
},
install_requires=[
'matplotlib>=1.4.2',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Utilities',
],
)
| bsd-3-clause |
monkeybutter/AeroMetTree | datasets/dataset_generator.py | 1 | 2702 | import numpy as np
import pandas as pd
from random import random, randint
from datetime import datetime
def randomizer(i0, i1, prob):
if random() < prob and i0 < i1-1:
new_i0 = randint(i0, i1-1)
new_i1 = randint(new_i0+1, i1)
return (new_i0, new_i1)
else:
return None
def splitter(i, j):
if j-i > 2:
return randint(i+1, j-1)
else:
return None
def create_simple_data(size):
input_a = np.sort(np.random.uniform(low=0.0, high=1000.0, size=size))
i = 0
class_var = np.random.normal(i, .1, size=size)
a, b = 0, size-1
while True:
i += 1
bounds = randomizer(a, b, .8)
if bounds is None:
break
class_var[a:b] = np.random.normal(i, .1, size=b-a)
a, b = bounds
return pd.DataFrame(np.vstack((input_a, class_var)).T, columns=['input1', 'class_var'])
def create_simple_data2(size, min_size):
input_a = np.sort(np.random.uniform(low=0.0, high=1000.0, size=size))
class_var = np.zeros(size)
def write_chunk(i, j, min_size, level):
class_var[i:j] = np.random.normal(level, .1, size=j-i)
k = splitter(i, j)
if k-i > min_size:
write_chunk(i, k, min_size, level+1)
if j-k > min_size:
write_chunk(k, j, min_size, level+1)
write_chunk(0, size-1, min_size, 0)
return pd.DataFrame(np.vstack((input_a, class_var)).T, columns=['input1', 'class_var'])
def transform(name):
df = pd.read_csv("./{}.csv".format(name))
df['time'] = df['time'].map(lambda x: datetime.strptime(x, '%H:%M'))
df['time'] = df['time'].map(lambda x: ((x.minute / 60.0 + x.hour) / 24.0) * 360)
df['date'] = df['date'].map(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df['date'] = df['date'].map(lambda x: (x.timetuple().tm_yday / 365.0)*360.0)
df['date'] = df['date'].map(lambda x: int(10 * round(float(x)/10)))
df['gfs_press'] = df['gfs_press'].map(lambda x: int(round(float(x))))
df['gfs_rh'] = df['gfs_rh'].map(lambda x: int(round(float(x))))
df['gfs_temp'] = df['gfs_temp'].map(lambda x: int(round(float(x))))
df['gfs_wind_dir'] = df['gfs_wind_dir'].map(lambda x: int(10 * round(float(x)/10)))
df['gfs_wind_spd'] = df['gfs_wind_spd'].map(lambda x: int(.5 * round(float(x)/.5)))
#df = df.drop(['metar_press', 'metar_rh', 'metar_temp', 'metar_wind_dir'], 1)
df = df.drop(['metar_wind_dir'], 1)
df.index = range(0, len(df.index))
df.to_csv("./{}_clean.csv".format(name))
if __name__ == "__main__":
airports = ["eddt", "egll", "lebl", "lfpg", "limc", "yssy", "zbaa"]
for airport in airports:
transform(airport) | mit |
MLWave/kepler-mapper | setup.py | 1 | 2366 | #!/usr/bin/env python
from setuptools import setup
import re
VERSIONFILE="kmapper/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
with open('README.md') as f:
long_description = f.read()
setup(name='kmapper',
version=verstr,
description='Python implementation of Mapper algorithm for Topological Data Analysis.',
long_description=long_description,
long_description_content_type="text/markdown",
author='HJ van Veen, Nathaniel Saul',
author_email='info@mlwave.com, nat@saulgill.com',
url='http://kepler-mapper.scikit-tda.org',
license='MIT',
packages=['kmapper'],
include_package_data=True,
extras_require={
'testing': [ # `pip install -e ".[testing]"``
'pytest',
'networkx',
'matplotlib',
'python-igraph',
'plotly',
'ipywidgets'
],
'docs': [ # `pip install -e ".[docs]"``
'sktda_docs_config',
'sphinx-gallery',
'pandas',
# for building docs for plotlyviz stuff
'networkx',
'matplotlib',
'python-igraph',
'plotly',
'ipywidgets'
]
},
install_requires=[
'scikit-learn',
'numpy',
'scipy',
'Jinja2'
],
python_requires='>=2.7,!=3.1,!=3.2,!=3.3',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Healthcare Industry',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='mapper, topology data analysis, algebraic topology, unsupervised learning'
)
| mit |
masasin/spirit | src/analysis/ttest_analysis.py | 1 | 2526 | from collections import namedtuple
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import stats
from .csv_analysis import analyze_data, load_surveys
from ..data.survey_utils import ExperimentType
TTEST_DIR = Path(__file__).parent.joinpath("../../models")
ColResult = namedtuple("ColResult", "col mu1 std1 mu2 std2 delta_mu t p g")
def hedges_g(sample1, sample2):
n_samples = len(sample1) + len(sample2)
return ((sample2.mean() - sample1.mean())
/ _std_weighted_pooled(sample1, sample2)
* (n_samples - 3) / (n_samples - 2.25)
* np.sqrt((n_samples - 2) / n_samples))
def _std_weighted_pooled(*samples):
return np.sqrt(
sum((len(sample) - 1) * sample.std()**2 for sample in samples)
/ (sum(len(sample) for sample in samples) - 2))
def do_ttest(df, columns, save_name=None,
type1=ExperimentType.Onboard, type2=ExperimentType.Spirit):
results = []
for column in columns:
sample1 = df[df.experiment_type == type1][column]
sample2 = df[df.experiment_type == type2][column]
ttest_result = stats.ttest_rel(sample2, sample1)
results.append(
ColResult(
column,
sample1.mean(), sample1.std(),
sample2.mean(), sample2.std(),
sample2.mean() - sample1.mean(),
ttest_result.statistic, ttest_result.pvalue,
hedges_g(sample1, sample2)
)
)
if save_name is not None:
df = pd.DataFrame(results).set_index("col")
with open(TTEST_DIR.joinpath(f"ttest_{save_name}.tex"), "w") as fout:
fout.write(df.to_latex())
if __name__ == "__main__":
results, analyses = analyze_data()
# analyses_columns = ["duration", "dist_err", "x_err", "y_err", "rms_x",
# "rms_y", "path_length", "move_l", "move_r", "move_x",
# "move_b", "move_f", "move_y"]
# do_ttest(analyses, analyses_columns, "analyses")
users, tlx, surveys = load_surveys()
tlx_columns = ["mental", "physical", "temporal", "performance", "effort",
"frustration", "tlx"]
survey_columns = ["orientation_understanding", "orientation_control",
"position_understanding", "position_control",
"spacial_understanding", "spacial_control", "total"]
do_ttest(tlx, tlx_columns, save_name="tlx")
do_ttest(surveys, survey_columns, save_name="survey")
| mit |
dp7-PU/QCLAS_public | src/qclasGUI.py | 1 | 30542 | """
Make a GUI for qclas. This program uses HAPI to generate absorption profiles.
The program comes in without HITRAN data files. User can use the program to
download lines they need.
GUI of the program is based on PyQt4.
Da Pan, v-alpha, started on 02/13/2016
"""
import hapi
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import os
import gasPropertyWidget
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import specCal
import dasRead
import six
import matplotlib
class mplCanvas(QtWidgets.QWidget):
def __init__(self, parent=None, width=5, height=4, dpi=100, bgcolor='#ffffff'):
super(mplCanvas, self).__init__(parent)
# a figure instance to plot on
self.figure = plt.figure(figsize=(width, height), dpi=dpi, facecolor=bgcolor)
self.axes = self.figure.add_subplot(111)
self.index = 0
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
FigureCanvas.setSizePolicy(self.canvas,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# Just some plotButton connected to `plot` method
self.plotButton = QtWidgets.QPushButton('Plot', parent=self)
self.exportButton = QtWidgets.QPushButton('Export', parent=self)
# set the layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
buttonHBox = QtWidgets.QHBoxLayout()
buttonHBox.addWidget(self.plotButton)
buttonHBox.addWidget(self.exportButton)
layout.addLayout(buttonHBox)
self.setLayout(layout)
class AppWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(AppWindow, self).__init__(parent)
self.setWindowTitle('QCLAS')
self.scrsz = QtWidgets.QDesktopWidget().availableGeometry().getRect()
self.dpi = int(self.scrsz[2] / 25)
self.initUI()
def resizeEvent(self, resizeEvent):
self.updateCanvasGeometry()
def initUI(self):
# Action: set HAPI database Directory
self.initMenu()
self.gasListLabel = QtWidgets.QLabel()
self.calGasList = []
self.canvasList = []
self.setGasListLabel()
self.statusBar().addPermanentWidget(self.gasListLabel)
self.mainWidget = QtWidgets.QWidget()
self.setFrames()
self.setLeftColumn()
self.setCanvas()
self.laserSpec = None
self.setCalibFileDialog()
self.setCentralWidget(self.mainWidget)
self.resize(0.8 * self.scrsz[2], 0.8 * self.scrsz[3])
def initMenu(self):
# HITRAN Data menu
chHapiDir = QtWidgets.QAction('&Change HITRAN Dir', self)
chHapiDir.triggered.connect(self.setHapiDir)
dnldData = QtWidgets.QAction('&Download HITRAN Data', self)
dnldData.triggered.connect(self.fetchData)
saveHData = QtWidgets.QAction('&Save HITRAN Data', self)
saveHData.triggered.connect(self.commitHData)
listHData = QtWidgets.QAction('Available HITRAN Data', self)
listHData.triggered.connect(self.listHData)
self.HapiMenu = QtWidgets.QMenu('&HITRAN Data', self)
self.HapiMenu.addAction(chHapiDir)
self.HapiMenu.addAction(dnldData)
self.HapiMenu.addAction(saveHData)
self.HapiMenu.addAction(listHData)
# Laser setting menu
loadLaserFile = QtWidgets.QAction('&Load laser config', self)
loadLaserFile.triggered.connect(self.getLaserConfig)
self.laserMenu = QtWidgets.QMenu('&Laser Config')
self.laserMenu.addAction(loadLaserFile)
# Calibration mode menu
self.enterCaliMode = QtWidgets.QAction('Calibration mode', self, checkable=True)
self.enterCaliMode.triggered.connect(self.showCaliMode)
self.caliModeMenu = QtWidgets.QMenu('&Advance')
self.caliModeMenu.addAction(self.enterCaliMode)
# Save results menu
# saveResults = QtGui.QAction('&')
self.menuBar().addMenu(self.HapiMenu)
self.menuBar().addMenu(self.laserMenu)
self.menuBar().addMenu(self.caliModeMenu)
##### BLOCK 1: HAPI data management
def getLaserConfig(self):
fileName = self.getFileNameDialog()
self.laserSpec = specCal.read_config(fileName)
def getDasDir(self):
dirStr = self.getFileDirDialog()
self.dasDir.setText(dirStr)
self.calibDialog.raise_()
self.calibDialog.activateWindow()
def getWmsDir(self):
wmsStr = self.getFileDirDialog()
self.wmsDir.setText(wmsStr)
self.calibDialog.raise_()
self.calibDialog.activateWindow()
def showCaliMode(self):
if self.enterCaliMode.isChecked():
self.calibDialog.show()
self.numPanel.button(4).setChecked(True)
self.plotTotalCheck.setEnabled(False)
for button in self.numPanel.buttons():
button.setEnabled(False)
self.setCanvas()
self.calibModeWidget.show()
else:
self.plotTotalCheck.setEnabled(True)
for button in self.numPanel.buttons():
button.setEnabled(True)
self.calibModeWidget.hide()
QtWidgets.QApplication.processEvents()
def setCalibMode(self):
self.calibModeWidget = QtWidgets.QWidget()
vBox = QtWidgets.QVBoxLayout()
bslLabel = QtWidgets.QLabel('Baseline name: ')
self.bslName = QtWidgets.QLineEdit()
bslOrderLabel = QtWidgets.QLabel('Order')
self.bslOrder = QtWidgets.QLineEdit()
bslHBox = QtWidgets.QHBoxLayout()
bslHBox.addWidget(bslLabel)
bslHBox.addWidget(self.bslName)
bslHBox.addWidget(bslOrderLabel)
bslHBox.addWidget(self.bslOrder)
bslRngLabel = QtWidgets.QLabel('Baseline fit range:')
self.bslRng = QtWidgets.QLineEdit()
bslRngHBox = QtWidgets.QHBoxLayout()
bslRngHBox.addWidget(bslRngLabel)
bslRngHBox.addWidget(self.bslRng)
fitButton = QtWidgets.QPushButton('Fit baseline')
fitButton.clicked.connect(self.showBslFit)
validRngLabel = QtWidgets.QLabel('Valid spec range:')
validRngBox = QtWidgets.QHBoxLayout()
self.validRng = QtWidgets.QLineEdit()
validRngBox.addWidget(validRngLabel)
validRngBox.addWidget(self.validRng)
showAbsorbanceButton = QtWidgets.QPushButton('Show absorbance')
showAbsorbanceButton.clicked.connect(self.calcAbsorbance)
fitSettingBox = QtWidgets.QHBoxLayout()
pkLocLabel = QtWidgets.QLabel('Peak nu: ')
self.pkLoc = QtWidgets.QLineEdit()
trateLabel = QtWidgets.QLabel('Tuning rate: ')
self.trate = QtWidgets.QLineEdit()
fitSettingBox.addWidget(pkLocLabel)
fitSettingBox.addWidget(self.pkLoc)
fitSettingBox.addWidget(trateLabel)
fitSettingBox.addWidget(self.trate)
fitAbsButton = QtWidgets.QPushButton()
fitAbsButton.clicked.connect(self.dasFit)
fitSettingBox.addWidget(fitAbsButton)
vBox.addLayout(bslHBox)
vBox.addLayout(bslRngHBox)
vBox.addWidget(fitButton)
vBox.addLayout(validRngBox)
vBox.addWidget(showAbsorbanceButton)
vBox.addLayout(fitSettingBox)
self.calibModeWidget.setLayout(vBox)
self.calibModeWidget.hide()
def setCalibFileDialog(self):
self.calibDialog = QtWidgets.QDialog()
self.calibDialog.setWindowTitle('Open calibration files')
vBox = QtWidgets.QVBoxLayout()
calibTitle = QtWidgets.QLabel('Calibration setting')
dasDirHBox = QtWidgets.QHBoxLayout()
dasDirLabel = QtWidgets.QLabel('DAS dir: ')
self.dasDir = QtWidgets.QLineEdit()
dasDirButton = QtWidgets.QPushButton('...')
dasDirButton.clicked.connect(self.getDasDir)
dasDirHBox.addWidget(dasDirLabel)
dasDirHBox.addWidget(self.dasDir)
dasDirHBox.addWidget(dasDirButton)
dasPrefixHBox = QtWidgets.QHBoxLayout()
dasPrefixLabel = QtWidgets.QLabel('DAS prefix')
self.dasPrefix = QtWidgets.QLineEdit()
dasIdxRngLabel = QtWidgets.QLabel('Range (start:end):')
self.dasIdxRng = QtWidgets.QLineEdit()
dasPrefixHBox.addWidget(dasPrefixLabel)
dasPrefixHBox.addWidget(self.dasPrefix)
dasPrefixHBox.addWidget(dasIdxRngLabel)
dasPrefixHBox.addWidget(self.dasIdxRng)
wmsDirHBox = QtWidgets.QHBoxLayout()
wmsDirLabel = QtWidgets.QLabel('WMS dir: ')
self.wmsDir = QtWidgets.QLineEdit()
wmsDirButton = QtWidgets.QPushButton('...')
wmsDirButton.clicked.connect(self.getWmsDir)
wmsDirHBox.addWidget(wmsDirLabel)
wmsDirHBox.addWidget(self.wmsDir)
wmsDirHBox.addWidget(wmsDirButton)
wmsPrefixHBox = QtWidgets.QHBoxLayout()
wmsPrefixLabel = QtWidgets.QLabel('DAS prefix')
self.wmsPrefix = QtWidgets.QLineEdit()
wmsPrefixHBox.addWidget(wmsPrefixLabel)
wmsPrefixHBox.addWidget(self.wmsPrefix)
okButton = QtWidgets.QPushButton('Import data')
okButton.clicked.connect(self.readDasData)
vBox.addWidget(calibTitle)
vBox.addLayout(dasDirHBox)
vBox.addLayout(dasPrefixHBox)
vBox.addLayout(wmsDirHBox)
vBox.addLayout(wmsPrefixHBox)
vBox.addWidget(okButton)
self.calibDialog.setLayout(vBox)
def readDasData(self):
print self.dasIdxRng.text()
dasIdxRng = map(int, str(self.dasIdxRng.text()).split(':'))
print dasIdxRng
self.dasMeas = dasRead.dasSignal(location=str(self.dasDir.text()),
idx_range=range(dasIdxRng[0], dasIdxRng[
1]),
prefix=str(self.dasPrefix.text()))
canvas = self.canvasList[0]
canvas.axes.plot(self.dasMeas.data)
canvas.figure.tight_layout()
canvas.canvas.draw()
canvas.canvas.updateGeometry()
def getFileNameDialog(self):
fileName = QtWidgets.QFileDialog.getOpenFileName(self)[0]
return fileName
def getFileDirDialog(self):
DirName = QtWidgets.QFileDialog.getExistingDirectory(self)
return DirName
def getGasList(self):
self.gasList = hapi.getTableList()
hapi.getTableList()
self.gasList.remove('sampletab')
def setGasListLabel(self):
self.getGasList()
if len(self.gasList) == 0:
self.gasListLabel.setText('No Data')
else:
self.gasListLabel.setText('Data ready for: ' + ', '.join(self.gasList))
def setHapiDir(self):
dbDir = self.getFileDirDialog()
hapi.db_begin_pickle(dbDir)
for gas in self.gasList:
hapi.dropTable(gas)
self.gasList = []
self.setGasListLabel()
self.scrollGasPanel.gasList = self.gasList
self.scrollGasPanel.updateAll()
hapi.tableList()
def fetchData(self):
inputStr, ok = QtWidgets.QInputDialog.getText(self, 'Add data',
"Temporarily add data to the database; the data will not be saved." +
"\nLarge database will slow down loading processes when start the program." +
"\nTo Save the data, use 'Save HITRAN data'." +
"\n\nEnter Gas name, min, and max wavenumber separated by ',' (e.g. H2O,1000,2000)")
if ok:
params = str(inputStr).split(',')
for i in range(40):
try:
name = hapi.moleculeName(i + 1)
if name == params[0]:
M = i + 1
print M
except:
pass
# try:
print params[0]
hapi.fetch_pickle(params[0], M, 1, int(params[1]), int(params[2]))
nu = hapi.getColumn(params[0], 'nu')
self.statusBar().showMessage(
str(len(nu)) + ' lines' + ' added for ' + params[0] + ' ' + params[
1] + '<nu<' + params[2])
# except:
# self.statusBar().showMessage('Data fetch failed')
self.setGasListLabel()
self.scrollGasPanel.gasList = self.gasList
self.scrollGasPanel.updateAll()
def commitHData(self):
hapi.db_commit_pickle()
self.statusBar().showMessage('HITRAN data saved')
##### End of BLOCK 1.
def setWaveRangeWidget(self):
# TODO add cm-1, nm, um conversion
self.waveRangeWidget = QtWidgets.QWidget(self.mainWidget)
hbox = QtWidgets.QHBoxLayout()
label1 = QtWidgets.QLabel('Nu range: ')
label2 = QtWidgets.QLabel('to')
self.minNu = QtWidgets.QLineEdit(self.mainWidget)
self.minNu.setText('1000')
self.minNu.setMaximumWidth(90)
self.maxNu = QtWidgets.QLineEdit(self.mainWidget)
self.maxNu.setText('1100')
self.maxNu.setMaximumWidth(90)
hbox.addWidget(label1)
hbox.addWidget(self.minNu)
hbox.addWidget(label2)
hbox.addWidget(self.maxNu)
labelNumPt = QtWidgets.QLabel('; # of point: ')
self.numPt = QtWidgets.QLineEdit(self.mainWidget)
self.numPt.setText('1000')
self.numPt.setMaximumWidth(70)
hbox.addWidget(labelNumPt)
hbox.addWidget(self.numPt)
hbox.setAlignment(QtCore.Qt.AlignLeft)
hbox.setContentsMargins(0, 0, 0, 0)
self.waveRangeWidget.setLayout(hbox)
def setSpecWidget(self):
self.specWidget = QtWidgets.QWidget(self.mainWidget)
hbox = QtWidgets.QHBoxLayout()
labelWhatPlot = QtWidgets.QLabel(' Plot: ')
self.specChecks = QtWidgets.QButtonGroup(self.mainWidget)
wmsCheck = QtWidgets.QRadioButton(self.mainWidget)
dasCheck = QtWidgets.QRadioButton(self.mainWidget)
wmsCheck.setText('WMS')
dasCheck.setText('DAS')
dasCheck.clicked.connect(self.chComboWhatPlot)
wmsCheck.clicked.connect(self.chComboWhatPlot)
self.comboWhatPlot = QtWidgets.QComboBox(self.mainWidget)
self.specChecks.addButton(dasCheck, 1)
self.specChecks.addButton(wmsCheck, 2)
dasCheck.setChecked(True)
self.chComboWhatPlot()
hbox.addWidget(dasCheck)
hbox.addWidget(wmsCheck)
hbox.addWidget(labelWhatPlot)
hbox.addWidget(self.comboWhatPlot)
hbox.setAlignment(QtCore.Qt.AlignLeft)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(10)
self.specWidget.setLayout(hbox)
def setLineShapeWidget(self):
self.lineShapeWidget = QtWidgets.QWidget(self.mainWidget)
lineShapeLabel = QtWidgets.QLabel('Line shape profile: ')
self.comboLineShape = QtWidgets.QComboBox(self.mainWidget)
self.comboLineShape.addItems(['Voigt', 'HT', 'Lorentz', 'Doppler'])
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(lineShapeLabel)
hbox.addWidget(self.comboLineShape)
hbox.setAlignment(QtCore.Qt.AlignLeft)
hbox.setContentsMargins(0, 0, 0, 0)
self.lineShapeWidget.setLayout(hbox)
def setNumPanelWidget(self):
self.numPanelWidget = QtWidgets.QWidget()
self.numPanel = QtWidgets.QButtonGroup(self.mainWidget)
onePanel = QtWidgets.QRadioButton(self.mainWidget)
onePanel.setText('1')
onePanel.setChecked(True)
onePanel.clicked.connect(self.setCanvas)
twoPanel = QtWidgets.QRadioButton(self)
twoPanel.setText('2')
twoPanel.clicked.connect(self.setCanvas)
fourPanel = QtWidgets.QRadioButton(self)
fourPanel.setText('4')
fourPanel.clicked.connect(self.setCanvas)
numLabel = QtWidgets.QLabel('# of panel: ')
self.plotTotalCheck = QtWidgets.QCheckBox('Plot total')
self.plotTotalCheck.setChecked(True)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(numLabel)
hbox.addWidget(onePanel)
hbox.addWidget(twoPanel)
hbox.addWidget(fourPanel)
hbox.addWidget(self.plotTotalCheck)
self.numPanel.addButton(onePanel, 1)
self.numPanel.addButton(twoPanel, 2)
self.numPanel.addButton(fourPanel, 4)
hbox.setAlignment(QtCore.Qt.AlignLeft)
hbox.setContentsMargins(0, 0, 0, 0)
self.numPanelWidget.setLayout(hbox)
def setWmsMethodWidget(self):
self.wmsMethodWidget = QtWidgets.QWidget(self.mainWidget)
hbox = QtWidgets.QHBoxLayout()
labelWmsMethod = QtWidgets.QLabel('WMS calculation method: ')
self.wmsMethod = QtWidgets.QComboBox(self.mainWidget)
self.wmsMethod.addItem('Theoretical', 1)
self.wmsMethod.addItem('Simulation w/ params', 2)
hbox.addWidget(labelWmsMethod)
hbox.addWidget(self.wmsMethod)
hbox.setContentsMargins(0, 0, 0, 0)
self.wmsMethodWidget.setLayout(hbox)
def setWmsModWidget(self):
self.wmsModWidget = QtWidgets.QWidget(self.mainWidget)
hbox = QtWidgets.QHBoxLayout()
labelWmsMod = QtWidgets.QLabel('WMS modulation: ')
labelModUnit = QtWidgets.QLabel('cm -1')
self.leWmsMod = QtWidgets.QLineEdit('0.01')
hbox.addWidget(labelWmsMod)
hbox.addWidget(self.leWmsMod)
hbox.addWidget(labelModUnit)
hbox.setContentsMargins(0, 0, 0, 0)
self.wmsModWidget.setLayout(hbox)
def setICutWidget(self):
self.iCutWidget = QtWidgets.QWidget(self.mainWidget)
hbox = QtWidgets.QHBoxLayout()
labelIcut = QtWidgets.QLabel('Intensity threshold: ')
labelIcut.setMaximumWidth(150)
self.leICut = QtWidgets.QLineEdit(self.mainWidget)
self.leICut.setText('1e-30')
self.leICut.setMaximumWidth(90)
self.leICut.setAlignment(QtCore.Qt.AlignLeft)
hbox.addWidget(labelIcut)
hbox.addWidget(self.leICut)
hbox.setSpacing(10)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setAlignment(QtCore.Qt.AlignLeft)
self.iCutWidget.setLayout(hbox)
def setLeftColumn(self):
vboxTop = QtWidgets.QVBoxLayout()
vboxBottom = QtWidgets.QVBoxLayout()
self.scrollGasPanel = gasPropertyWidget.scrollPanel(self.mainWidget,
gasList=self.gasList)
self.setWaveRangeWidget()
self.setSpecWidget()
self.setWmsMethodWidget()
self.setWmsModWidget()
self.setLineShapeWidget()
self.setICutWidget()
self.setNumPanelWidget()
self.setCalibMode()
vboxBottom.addWidget(self.waveRangeWidget)
vboxBottom.addWidget(self.specWidget)
vboxBottom.addWidget(self.wmsMethodWidget)
vboxBottom.addWidget(self.wmsModWidget)
vboxBottom.addWidget(self.lineShapeWidget)
vboxBottom.addWidget(self.iCutWidget)
vboxBottom.addWidget(self.numPanelWidget)
vboxBottom.addWidget(self.calibModeWidget)
vboxBottom.setAlignment(QtCore.Qt.AlignTop)
vboxTop.addWidget(self.scrollGasPanel)
self.leftTop.setLayout(vboxTop)
self.leftBottom.setLayout(vboxBottom)
def genUnitDict(self):
"""
Generate unit dict to be passed down to specCal.
Returns
-------
unitDict: dict
A dict has following keys: 'c', 'p', 't', 'l'
"""
unitDict = {}
unitDict['c'] = str(self.scrollGasPanel.concUnit.currentText())
unitDict['p'] = str(self.scrollGasPanel.pressUnit.currentText())
unitDict['t'] = str(self.scrollGasPanel.tempUnit.currentText())
unitDict['l'] = str(self.scrollGasPanel.lengthUnit.currentText())
return unitDict
def setCanvas(self):
numPanel = self.numPanel.checkedId()
self.grid = QtWidgets.QGridLayout()
for canvas in self.canvasList:
self.grid.removeWidget(canvas)
canvas.deleteLater()
canvas.close()
canvas.setParent(None)
self.canvasList = []
self.resultList = []
position = [[1, 0], [2, 0], [1, 1], [2, 1]]
for i in range(numPanel):
canvas = mplCanvas(self, dpi=self.dpi)
canvas.plotButton.clicked.connect(self.calPlot)
canvas.exportButton.clicked.connect(self.exportData)
canvas.index = i
self.canvasList.append(canvas)
self.resultList.append({})
self.grid.addWidget(canvas, position[i][0], position[i][1])
# canvas.draw()
self.vboxRight.addLayout(self.grid)
def chComboWhatPlot(self):
if self.specChecks.checkedId() == 1:
self.comboWhatPlot.clear()
self.comboWhatPlot.addItem('Absorp coeff')
self.comboWhatPlot.addItem('Absorbance')
self.comboWhatPlot.addItem('Transmission')
else:
self.comboWhatPlot.clear()
for i in range(12):
self.comboWhatPlot.addItem(str(i + 1) + 'f')
def setFrames(self):
self.leftTop = QtWidgets.QGroupBox(self.mainWidget)
self.leftTop.setTitle('Set gas properties')
self.leftBottom = QtWidgets.QGroupBox(self.mainWidget)
self.leftBottom.setTitle('Set plot properties')
self.right = QtWidgets.QGroupBox(self.mainWidget)
self.right.setTitle('Results')
self.vboxRight = QtWidgets.QVBoxLayout()
self.right.setLayout(self.vboxRight)
self.split1 = QtWidgets.QSplitter(QtCore.Qt.Vertical)
self.split1.addWidget(self.leftTop)
self.split1.addWidget(self.leftBottom)
self.split2 = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
self.split2.addWidget(self.split1)
self.split2.addWidget(self.right)
self.split2.setStretchFactor(1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.split2)
self.mainWidget.setLayout(hbox)
def showError(self, errStr, details):
errBox = QtWidgets.QMessageBox(self)
errBox.setIcon(QtWidgets.QMessageBox.Information)
errBox.setText(errStr)
errBox.setDetailedText(details)
errBox.setWindowTitle('Error message')
errBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
def calPlot(self):
canvas = self.sender().parent()
print canvas.index
unitDict = self.genUnitDict()
nuMin = float(self.minNu.text())
nuMax = float(self.maxNu.text())
numPt = int(self.numPt.text())
nu = np.linspace(nuMin, nuMax, numPt)
iCut = float(self.leICut.text())
gasParamsList = self.scrollGasPanel.getGasInfo()
profile = self.comboLineShape.currentText()
mode = self.comboWhatPlot.currentText()
self.statusBar().showMessage('Calculating...')
if self.specChecks.checkedId() == 1:
dasResults = specCal.calDas(gasParamsList, nu, profile, mode,
iCut=iCut, unitDict=unitDict)
if type(dasResults) is str:
errorMessage = QtWidgets.QMessageBox()
errorMessage.setText(dasResults)
errorMessage.exec_()
self.statusBar().showMessage(dasResults)
else:
specCal.plotDas(canvas.axes, dasResults, mode,
showTotal=self.plotTotalCheck.isChecked(),
unitDict=unitDict)
self.statusBar().showMessage('Done.')
self.resultList[canvas.index] = dasResults
else:
method = self.wmsMethod.currentText()
dNu = float(self.leWmsMod.text())
nf = int(mode.replace('f', ''))
if method == 'Theoretical':
wmsResults = specCal.calWms(gasParamsList, nu, profile, nf, method,
dNu=dNu, unitDict=unitDict)
else:
if self.laserSpec is None:
self.showError('No laser configuration.', 'Please go to Laser '
'config and load a '
'laser configuration.')
wmsResults = 'No laser configuration.'
else:
# self.laserSpec['central_wavelength'] = (nuMin + nuMax) / 2.
# self.laserSpec['aRamp'] = (nuMax -
# nuMin) / 200 * 1e3 / self.laserSpec[
# 'tRamp']
wmsResults = specCal.calWms(gasParamsList, nu, profile, nf,
'Simulation with parameters',
laserSpec=self.laserSpec,
unitDict=unitDict)
if type(wmsResults) is str:
errorMessage = QtWidgets.QMessageBox()
errorMessage.setText(wmsResults)
errorMessage.exec_()
self.statusBar().showMessage(wmsResults)
else:
self.statusBar().showMessage('Done.')
specCal.plotWms(canvas.axes, wmsResults,
showTotal=self.plotTotalCheck.isChecked(),
unitDict=unitDict)
self.resultList[canvas.index] = wmsResults
canvas.figure.tight_layout()
canvas.canvas.draw()
canvas.canvas.updateGeometry()
def exportData(self):
canvas = self.sender().parent()
filename, pat = QtWidgets.QFileDialog.getSaveFileName(self, "Export "
"data "
"to csv file",
"output.csv",
filter=self.tr(
"CSV "
"files (*.csv)"))
specCal.csvOutput(filename, self.resultList[canvas.index])
def showBslFit(self):
rngStrs = str(self.bslRng.text()).split(',')
bslRng = []
for rng in rngStrs:
idx = map(int, rng.split(':'))
bslRng.append(slice(idx[0], idx[1]))
self.dasMeas.bslFit(str(self.bslName.text()), bslRng, silent=True,
order=int(str(self.bslOrder.text())))
canvas = self.canvasList[0]
# canvas.axes.clf()
canvas.axes.plot(self.dasMeas.bsl)
canvas.figure.tight_layout()
canvas.canvas.draw()
canvas.canvas.updateGeometry()
def calcAbsorbance(self):
validRng = map(int, str(self.validRng.text()).split(':'))
self.dasMeas.getAbsorp(validRng)
canvas = self.canvasList[2]
canvas.axes.plot(self.dasMeas.absorbance)
canvas.figure.tight_layout()
canvas.canvas.draw()
canvas.canvas.updateGeometry()
def dasFit(self):
nuMin = float(self.minNu.text())
nuMax = float(self.maxNu.text())
numPt = int(self.numPt.text())
nu = np.linspace(nuMin, nuMax, numPt)
self.dasConc = specCal.dasFit(self.scrollGasPanel.getGasInfo(), float(str(
self.trate.text())), self.dasMeas.absorbance, nu, silence=False)
def updateCanvasGeometry(self):
for canvas in self.canvasList:
canvas.figure.tight_layout()
canvas.canvas.updateGeometry()
def listHData(self):
dialog = QtWidgets.QDialog(self.mainWidget)
vboxScroll = QtWidgets.QVBoxLayout()
scrollWidget = QtWidgets.QWidget(self.mainWidget)
scrollArea = QtWidgets.QScrollArea(self.mainWidget)
closeButton = QtWidgets.QPushButton('Close')
closeButton.clicked.connect(dialog.close)
vboxDialog = QtWidgets.QVBoxLayout()
for gas in self.gasList:
nu = np.array(hapi.getColumn(gas, 'nu'))
gasInfo = gas + ' :' + str(nu.min()) + ' to ' + str(nu.max()) + ' cm -1'
labelGasInfo = QtWidgets.QLabel(gasInfo)
vboxScroll.addWidget(labelGasInfo)
vboxScroll.setAlignment(QtCore.Qt.AlignTop)
scrollWidget.setLayout(vboxScroll)
scrollArea.setWidget(scrollWidget)
vboxDialog.addWidget(scrollArea)
vboxDialog.addWidget(closeButton)
dialog.setWindowTitle('Available HITRAN data')
dialog.setMinimumWidth(0.3 * self.scrsz[2])
dialog.setLayout(vboxDialog)
dialog.show()
def resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
def main():
filename = 'defaultSettings.txt'
app = QtWidgets.QApplication(sys.argv)
appWindow = AppWindow()
appWindow.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.