repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
sanghoon/tf-exercise-gan | datasets/data_celeba.py | 1 | 3494 | from common import plot
import os.path
import glob
import cv2
import random
import numpy as np
class ImgDataset:
def __init__(self, dataDir, i_from=0, i_to=None, shuffle=False, crop=None, resize=None):
self.dataDir = dataDir
self.img_list = glob.glob(os.path.join(dataDir, "*.jpg"))
self.img_list = sorted(self.img_list)[i_from:i_to]
self.shuffle = shuffle
self._i = 0
self.resize = resize # Expected width and height
self.crop = crop # Cropsize at center
if shuffle:
random.shuffle(self.img_list)
self.preloaded = False
self.images = [self[0]] # Dummy image for size calculation in other codes
def preload(self):
images = map(cv2.imread, self.img_list)
images = map(self.crop_and_resize, images)
self.images = images
self.preloaded = True
def crop_and_resize(self, im):
# Crop
if self.crop:
h, w, = im.shape[:2]
j = int(round((h - self.crop) / 2.))
i = int(round((w - self.crop) / 2.))
im = im[j:j+self.crop, i:i+self.crop, :]
if self.resize:
im = cv2.resize(im, (self.resize, self.resize))
# rescale (range: 0.0~1.0)
im = im.astype(np.float32) / 255.0
# Reverse RGB-ordering (Please refer http://www.pyimagesearch.com/2014/11/03/display-matplotlib-rgb-image/)
im = im[:, :, (2,1,0)]
return im
def __getitem__(self, item):
if self.preloaded:
return self.images[item]
if isinstance(item, tuple) or isinstance(item, slice):
im = map(cv2.imread, self.img_list[item])
im = map(self.crop_and_resize, im)
else:
# Read image
im = cv2.imread(self.img_list[item])
im = self.crop_and_resize(im)
return im
def __len__(self):
return len(self.img_list)
def next_batch(self, batch_size):
samples = self[self._i : self._i + batch_size]
self._i += batch_size
# If reached the end of the dataset
if self._i >= len(self):
# Re-initialize
self._i = 0
if self.shuffle:
random.shuffle(self.img_list)
n_more = batch_size - len(samples)
samples = samples + self.next_batch(n_more)[0]
return samples, None
class CelebA:
def __init__(self, dataDir):
self.train = ImgDataset(dataDir, i_from=0, i_to=150000, shuffle=True, crop=108, resize=64)
self.validation = ImgDataset(dataDir, i_from=150000, i_to=160000, crop=108, resize=64)
self.test = ImgDataset(dataDir, i_from=160000, i_to=None, crop=108, resize=64)
# for evaluation
self.validation.preload()
# TODO: Follow the original set's train/val/test pratition
# TODO: Provide label info.
# TODO: refactoring
def plot(self, img_generator, fig_id=None):
samples = img_generator(16)
fig = plot(samples, fig_id, shape=self.train.images[0].shape)
return fig
if __name__ == '__main__':
import sys
dataDir = sys.argv[1]
data = CelebA(dataDir)
ims, _ = data.train.next_batch(16)
for i in range(16):
cv2.imshow('image', ims[i][:, :, (2,1,0)])
cv2.waitKey(0)
ims, _ = data.test.next_batch(16)
for i in range(16):
cv2.imshow('image', ims[i][:, :, (2,1,0)])
cv2.waitKey(0) | mit |
mdaniel/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_thrift.py | 9 | 22067 | """Contains methods for building Thrift structures for interacting with IDE
The methods from this file are used for Python console interaction. Please
note that the debugger still uses XML structures with the similar methods
contained in `pydevd_xml.py` file.
"""
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_extension_utils
from _pydevd_bundle import pydevd_resolver
from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, IS_PY3K, \
MAXIMUM_VARIABLE_REPRESENTATION_SIZE, RETURN_VALUES_DICT, LOAD_VALUES_POLICY, DEFAULT_VALUES_DICT, NUMPY_NUMERIC_TYPES
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_utils import take_first_n_coll_elements, is_pandas_container, is_string, pandas_to_str, \
should_evaluate_full_value, should_evaluate_shape
from _pydevd_bundle.pydevd_vars import get_label, array_default_format, is_able_to_format_number, MAXIMUM_ARRAY_SIZE, \
get_column_formatter_by_type, get_formatted_row_elements, DEFAULT_DF_FORMAT, DATAFRAME_HEADER_LOAD_MAX_SIZE
from pydev_console.pydev_protocol import DebugValue, GetArrayResponse, ArrayData, ArrayHeaders, ColHeader, RowHeader, \
UnsupportedArrayTypeException, ExceedingArrayDimensionsException
try:
import types
frame_type = types.FrameType
except:
frame_type = None
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
_IS_JYTHON = sys.platform.startswith("java")
def _create_default_type_map():
if not _IS_JYTHON:
default_type_map = [
# None means that it should not be treated as a compound variable
# isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
default_type_map.append((long, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((unicode, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((set, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
default_type_map.append((frozenset, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
from django.utils.datastructures import MultiValueDict
default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))
# we should put it before dict
except:
pass # django may not be installed
try:
from django.forms import BaseForm
default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))
# we should put it before instance resolver
except:
pass # django may not be installed
try:
from collections import deque
default_type_map.append((deque, pydevd_resolver.dequeResolver))
except:
pass
if frame_type is not None:
default_type_map.append((frame_type, pydevd_resolver.frameResolver))
else:
from org.python import core # @UnresolvedImport
default_type_map = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
# Jython 2.5b3 removed it.
default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
return default_type_map
class TypeResolveHandler(object):
NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).
def __init__(self):
# Note: don't initialize with the types we already know about so that the extensions can override
# the default resolvers that are already available if they want.
self._type_to_resolver_cache = {}
self._type_to_str_provider_cache = {}
self._initialized = False
def _initialize(self):
self._default_type_map = _create_default_type_map()
self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)
self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)
self._initialized = True
def get_type(self, o):
try:
try:
# Faster than type(o) as we don't need the function call.
type_object = o.__class__
except:
# Not all objects have __class__ (i.e.: there are bad bindings around).
type_object = type(o)
type_name = type_object.__name__
except:
# This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
return self._get_type(o, type_object, type_name)
def _get_type(self, o, type_object, type_name):
resolver = self._type_to_resolver_cache.get(type_object)
if resolver is not None:
return type_object, type_name, resolver
if not self._initialized:
self._initialize()
try:
for resolver in self._resolve_providers:
if resolver.can_provide(type_object, type_name):
# Cache it
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
for t in self._default_type_map:
if isinstance(o, t[0]):
# Cache it
resolver = t[1]
self._type_to_resolver_cache[type_object] = resolver
return (type_object, type_name, resolver)
except:
traceback.print_exc()
# No match return default (and cache it).
resolver = pydevd_resolver.defaultResolver
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
if _IS_JYTHON:
_base_get_type = _get_type
def _get_type(self, o, type_object, type_name):
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
return self._base_get_type(o, type_name, type_name)
def str_from_providers(self, o, type_object, type_name):
provider = self._type_to_str_provider_cache.get(type_object)
if provider is self.NO_PROVIDER:
return None
if provider is not None:
return provider.get_str(o)
if not self._initialized:
self._initialize()
for provider in self._str_providers:
if provider.can_provide(type_object, type_name):
self._type_to_str_provider_cache[type_object] = provider
return provider.get_str(o)
self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
return None
_TYPE_RESOLVE_HANDLER = TypeResolveHandler()
"""
def get_type(o):
Receives object and returns a triple (typeObject, typeString, resolver).
resolver != None means that variable is a container, and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
get_type = _TYPE_RESOLVE_HANDLER.get_type
_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers
def frame_vars_to_struct(frame_f_locals, hidden_ns=None):
"""Returns frame variables as the list of `DebugValue` structures
"""
values = []
keys = dict_keys(frame_f_locals)
if hasattr(keys, 'sort'):
keys.sort() # Python 3.0 does not have it
else:
keys = sorted(keys) # Jython 2.1 does not have it
return_values = []
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == RETURN_VALUES_DICT:
for name, val in dict_iter_items(v):
value = var_to_struct(val, name)
value.isRetVal = True
return_values.append(value)
else:
if hidden_ns is not None and k in hidden_ns:
value = var_to_struct(v, str(k), evaluate_full_value=eval_full_val)
value.isIPythonHidden = True
values.append(value)
else:
value = var_to_struct(v, str(k), evaluate_full_value=eval_full_val)
values.append(value)
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
# Show return values as the first entry.
return return_values + values
def var_to_struct(val, name, format='%s', do_trim=True, evaluate_full_value=True):
""" single variable or dictionary to Thrift struct representation """
debug_value = DebugValue()
try:
# This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).
is_exception_on_eval = val.__class__ == ExceptionOnEvaluate
except:
is_exception_on_eval = False
if is_exception_on_eval:
v = val.result
else:
v = val
_type, typeName, resolver = get_type(v)
type_qualifier = getattr(_type, "__module__", "")
if not evaluate_full_value:
value = DEFAULT_VALUES_DICT[LOAD_VALUES_POLICY]
else:
try:
str_from_provider = _str_from_providers(v, _type, typeName)
if str_from_provider is not None:
value = str_from_provider
elif hasattr(v, '__class__'):
if v.__class__ == frame_type:
value = pydevd_resolver.frameResolver.get_frame_name(v)
elif v.__class__ in (list, tuple):
if len(v) > pydevd_resolver.MAX_ITEMS_TO_HANDLE:
value = '%s' % take_first_n_coll_elements(
v, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
value = value.rstrip(')]}') + '...'
else:
value = '%s' % str(v)
else:
value = format % v
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
debug_value.name = name
debug_value.type = typeName
if type_qualifier:
debug_value.qualifier = type_qualifier
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and do_trim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
# fix to work with unicode values
try:
if not IS_PY3K:
if value.__class__ == unicode: # @UndefinedVariable
value = value.encode('utf-8')
else:
if value.__class__ == bytes:
value = value.encode('utf-8')
except TypeError: # in java, unicode is a function
pass
if is_pandas_container(type_qualifier, typeName, v):
value = pandas_to_str(v, typeName, value, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
debug_value.value = value
try:
if should_evaluate_shape():
if hasattr(v, 'shape') and not callable(v.shape):
debug_value.shape = str(tuple(v.shape))
elif hasattr(v, '__len__') and not is_string(v):
debug_value.shape = str(len(v))
except:
pass
if is_exception_on_eval:
debug_value.isErrorOnEval = True
else:
if resolver is not None:
debug_value.isContainer = True
else:
pass
return debug_value
def var_to_str(val, format, do_trim=True, evaluate_full_value=True):
struct = var_to_struct(val, '', format, do_trim, evaluate_full_value)
value = struct.value
return value if value is not None else ''
# from pydevd_vars.py
def array_to_thrift_struct(array, name, roffset, coffset, rows, cols, format):
"""
"""
array, array_chunk, r, c, f = array_to_meta_thrift_struct(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
array_chunk.data = array_data_to_thrift_struct(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return array_chunk
def array_to_meta_thrift_struct(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
array_chunk = GetArrayResponse()
array_chunk.slice = slice
array_chunk.rows = rows
array_chunk.cols = cols
array_chunk.format = "%" + format
array_chunk.type = type
array_chunk.max = "%s" % bounds[1]
array_chunk.min = "%s" % bounds[0]
return array, array_chunk, rows, cols, format
def dataframe_to_thrift_struct(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
array_chunk = GetArrayResponse()
array_chunk.slice = name
array_chunk.rows = num_rows
array_chunk.cols = num_cols
array_chunk.type = ""
array_chunk.max = "0"
array_chunk.min = "0"
format = format.replace("%", "")
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = "O"
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
array_chunk.format = "%" + format
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
array_chunk.headers = header_data_to_thrift_struct(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
array_chunk.data = array_data_to_thrift_struct(rows, cols, None, format)
return array_chunk
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(c):
return get_column_formatter_by_type(format, dtypes[c])
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
array_chunk.headers = header_data_to_thrift_struct(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
array_chunk.data = array_data_to_thrift_struct(rows, cols, formatted_row_elements, format)
return array_chunk
def array_data_to_thrift_struct(rows, cols, get_row, format):
array_data = ArrayData()
array_data.rows = rows
array_data.cols = cols
# `ArrayData.data`
data = []
for row in range(rows):
data.append([var_to_str(value, format) for value in get_row(row)])
array_data.data = data
return array_data
def header_data_to_thrift_struct(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
array_headers = ArrayHeaders()
col_headers = []
for col in range(cols):
col_label = get_label(df.axes[1].values[col]) if dim > 1 else str(col)
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
col_header = ColHeader()
# col_header.index = col
col_header.label = col_label
col_header.type = dtypes[col]
col_header.format = col_to_format(col)
col_header.max = col_format % bounds[1]
col_header.min = col_format % bounds[0]
col_headers.append(col_header)
row_headers = []
for row in range(rows):
row_header = RowHeader()
row_header.index = row
row_header.label = get_label(df.axes[0].values[row])
row_headers.append(row_header)
array_headers.colHeaders = col_headers
array_headers.rowHeaders = row_headers
return array_headers
TYPE_TO_THRIFT_STRUCT_CONVERTERS = {
"ndarray": array_to_thrift_struct,
"DataFrame": dataframe_to_thrift_struct,
"Series": dataframe_to_thrift_struct,
"GeoDataFrame": dataframe_to_thrift_struct,
"GeoSeries": dataframe_to_thrift_struct
}
def table_like_struct_to_thrift_struct(array, name, roffset, coffset, rows, cols, format):
"""Returns `GetArrayResponse` structure for table-like structure
The `array` might be either `numpy.ndarray`, `pandas.DataFrame` or `pandas.Series`.
"""
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_THRIFT_STRUCT_CONVERTERS:
return TYPE_TO_THRIFT_STRUCT_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise UnsupportedArrayTypeException(type_name)
| apache-2.0 |
tomchor/pymicra | pymicra/io.py | 1 | 10722 | """
Defines some useful functions to aid on the input/output of data
"""
from __future__ import absolute_import, print_function, division
#-------------------------------------------
#-------------------------------------------
# INPUT OF DATA
#-------------------------------------------
#-------------------------------------------
def readDataFile(fname, variables=None, only_named_cols=True, **kwargs):
"""
Reads one datafile using pandas.read_csv()
Parameters
----------
variables: dict
keys are columns and values are names of variable
only_named_columns: bool
if True, don't read columns that don't appear on variables' keys
kwargs: dict
dictionary with kwargs of pandas' read_csv function
see http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html for more detail
variables: list or dict
list or dictionary containing the names of each variable in the file (if dict, the keys must be ints)
Returns
---------
pandas.DataFrame:
pandas.DataFrame object
"""
import pandas as pd
#------------
# This makes it easier to read dates
try:
dtypes={ i : str for i,key in enumerate(variables.values()) if r'%' in key }
except:
dtypes=None
#------------
#------------
# If only_named_cols == True, read all columns in the file
if not only_named_cols:
usedcols = None
else:
usedcols = sorted(variables.keys())
#------------
#------------
# Should work, but just in case it doesn't
try:
data=pd.read_csv(fname, usecols=usedcols, dtype=dtypes, **kwargs)
except ValueError:
print('WARNING: Ignoring dtypes for date columns. This may cause problems parsing dates')
print(usedcols)
print(kwargs)
print(fname)
data=pd.read_csv(fname, usecols=usedcols, **kwargs)
#------------
#------------
# Renaming columns according to our variables
data = data.rename(columns = variables)
#------------
return data
def readDataFiles(flist, verbose=0, **kwargs):
"""
Reads data from a list of files by calling readDataFile individually for each entry
Parameters
-----------
flist: sequence of strings
files to be parsed
verbose: bool
whether to print
**kwargs:
readDataFile kwargs
Returns
--------
pandas.DataFrame
data
"""
import pandas as pd
if len(flist)==0:
raise ValueError('Passed a list of files of zero length to be read.')
dflist=[]
for f in flist:
if verbose==1:
print('Reading',f)
subdata=readDataFile(f, **kwargs)
dflist.append(subdata)
if verbose:
print('Concatenating DataFrames...')
data=pd.concat(dflist, ignore_index=True)
if verbose:
print('Done!')
return data
def timeSeries(flist, datalogger, parse_dates=True, verbose=False,
read_data_kw={}, parse_dates_kw={}, clean_dates=True, return_units=True, only_named_cols=True):
"""
Creates a micrometeorological time series from a file or list of files.
Parameters
----------
flist: list or string
either list or names of files (dataFrame will be one concatenated dataframe) or the name of one file
datalogger: pymicra.fileConfig object
configuration of the datalogger which is from where all the configurations of the file will be taken
parse_date: bool
whether or not to index the data by date. Note that if this is False many of the functionalities
of pymicra will be lost.
(i.d. there are repeated timestamps)
verbose: int, bool
verbose level
Returns
-------
pandas.DataFrame
data contained in the files in flist
dict (optional)
units of the data
"""
from . import algs
#--------------
# If datalogger is a string it should be the path to a .dlc file
if isinstance(datalogger, str):
print('TESTING')
datalogger = fileConfig(datalogger)
#datalogger = _read_dlc(datalogger)
#--------------
#------------
# We read the file(s)
if isinstance(flist, str):
flist=[flist]
header_lines=datalogger.header_lines
skiprows=datalogger.skiprows
columns_separator=datalogger.columns_separator
if columns_separator=='whitespace':
timeseries=readDataFiles(flist, header=None, skiprows=skiprows, delim_whitespace=True,
variables = datalogger.variables, only_named_cols=only_named_cols, **read_data_kw)
else:
timeseries=readDataFiles(flist, header=None, skiprows=skiprows, sep=columns_separator,
variables = datalogger.variables, only_named_cols=only_named_cols, **read_data_kw)
#------------
#------------
# We parse de dates
if parse_dates:
if verbose: print('Starting to parse the dates')
timeseries=algs.parseDates(timeseries, dataloggerConfig=datalogger, **parse_dates_kw)
#------------
#------------
# We clean the dates (if not cleaned already
if clean_dates:
if verbose: print('Cleaning the date columns')
timeseries = timeseries[ [ col for col in timeseries.columns if col not in datalogger.date_col_names ] ]
#------------
if return_units:
return timeseries, datalogger.units.copy()
else:
return timeseries
def read_fileConfig(dlcfile):
"""
Reads file (metadata) configuration file
WARNING! When defining the .config file note that by default columns that are enclosed between doublequotes
will appear without the doublequotes. So if your file is of the form :
"2013-04-05 00:00:00", .345, .344, ...
Then the .config should have: variables={0:'%Y-%m-%d %H:%M:%S',1:'u',2:'v'}. This is the default csv format of
CampbellSci dataloggers. To disable this feature, you should parse the file with read_csv using the kw: quoting=3.
"""
from .core import fileConfig
globs={}
dlcvars={}
try:
execfile(dlcfile, globs, dlcvars)
except NameError:
exec(open(dlcfile).read(), globs, dlcvars)
return fileConfig(**dlcvars)
def read_site(sitefile):
"""
Reads .site configuration file, which holds siteConfig definitions
The .site should have definitions as regular python syntax (in meters!):
measurement_height = 10
canopy_height = 5
displacement_height = 3
roughness_length = 1.0
sitedile: str
path to .site file
Parameters
----------
sitefile: str
path to the site configuration file
Returns
-------
pymicra.siteConfig
pymicra site configuration object
"""
from .core import siteConfig
globs={}
sitevars={}
try:
execfile(sitefile, globs, sitevars)
except NameError:
exec(open(sitefile).read(), globs, sitevars)
#--------
# First try new class, if not possible, try old one
try:
return siteConfig(**sitevars)
except:
return fileConfig(**sitevars)
#--------
def readUnitsCsv(filename, **kwargs):
"""
Reads a csv file in which the first line is the name of the variables
and the second line contains the units
Parameters
----------
filename: string
path of the csv file to read
**kwargs:
to be passed to pandas.read_csv
Returns
--------
df: pandas.DataFrame
dataframe with the data
unitsdic: dictionary
dictionary with the variable names as keys and the units as values
"""
import pandas as pd
from .algs import parseUnits
#------
# Reads units and parses them
units = pd.read_csv(filename, nrows=1, skiprows=0, squeeze=False, header=0, index_col=None).iloc[0]
unitsdict = units.dropna().to_dict()
unitsdic = parseUnits(unitsdict)
#------
#------
# Reads the rest of the csv ignoring the units
df = pd.read_csv(filename, header=0, skiprows=[1,2], **kwargs)
#------
return df, unitsdic
def _read_dlc(dlcfile):
"""
Obsolete. Should use fileConfig() or read_fileConfig().
Reads datalogger configuration file.
When defining the .dlc note that by default columns that are enclosed between doublequotes
will appear without the doublequotes. So if your file is of the form :
"2013-04-05 00:00:00", .345, .344, ...
Then the .dlc should have: variables={0:'%Y-%m-%d %H:%M:%S',1:'u',2:'v'}. This is the default csv format of
CampbellSci dataloggers. To disable this feature, you should parse the file with read_csv using the kw: quoting=3.
"""
from .core import fileConfig
globs={}
dlcvars={}
try:
execfile(sitefile, globs, sitevars)
except NameError:
exec(open(sitefile).read(), globs, sitevars)
return fileConfig(**dlcvars)
#-------------------------------------------
#-------------------------------------------
# OUTPUT OF DATA
#-------------------------------------------
#-------------------------------------------
def write_as_fconfig(data, fname, fileconfig):
"""
Writes a pandas DataFrame in a format according to fileConfig object
"""
import pandas as pd
#--------------
# If the path to the dlc is provided, we read it as a dataloggerConfig object
if isinstance(fileconfig, str):
from . import fileConfig
fileconfig = fileConfig(fileconfig)
#--------------
if fileconfig.columns_separator=='whitespace':
import csv
sep=" "
escapechar=" "
quoting=csv.QUOTE_NONE
float_format='%14.7e'
else:
sep=fileconfig.columns_separator
escapechar=None
quoting=None
float_format=None
data.to_csv(fname, index=False, sep=sep, header=fileconfig.header,
escapechar=escapechar, quoting=quoting, float_format=float_format)
return
def _get_printable(data, units, to_tex_cols=True, to_tex_units=True):
"""
Returns a csv that is pandas-printable. It does so changing the column names to add units to it.
"""
if to_tex_cols==True:
from .constants import greek_alphabet
columns=[ u'\\'+c if c in greek_alphabet.values() else c for c in data.columns ]
units={ u'\\'+ c if c in greek_alphabet.values() else c : v for c,v in units.items() }
if to_tex_units==True:
from .util import _printUnit as pru
units={ k : pru(v) for k,v in units.items() }
columns=[ r'$\rm '+fl+r'\, \left({0}\right)$'.format(units[fl]) for fl in columns ]
df=data.copy()
df.columns=columns
return df
| gpl-3.0 |
nhuntwalker/astroML | examples/datasets/plot_sdss_S82standards.py | 5 | 2253 | """
SDSS Standard Star catalog
--------------------------
This demonstrates how to fetch and plot the colors of the SDSS Stripe 82
standard stars, both alone and with the cross-matched 2MASS colors.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_S82standards
from astroML.plotting import MultiAxes
#------------------------------------------------------------
# Plot SDSS data alone
data = fetch_sdss_S82standards()
colors = np.zeros((len(data), 4))
colors[:, 0] = data['mmu_u'] - data['mmu_g']
colors[:, 1] = data['mmu_g'] - data['mmu_r']
colors[:, 2] = data['mmu_r'] - data['mmu_i']
colors[:, 3] = data['mmu_i'] - data['mmu_z']
labels = ['u-g', 'g-r', 'r-i', 'i-z']
bins = [np.linspace(0.0, 3.5, 100),
np.linspace(0, 2, 100),
np.linspace(-0.2, 1.8, 100),
np.linspace(-0.2, 1.0, 100)]
fig = plt.figure(figsize=(10, 10))
ax = MultiAxes(4, hspace=0.05, wspace=0.05, fig=fig)
ax.density(colors, bins=bins)
ax.set_labels(labels)
ax.set_locators(plt.MaxNLocator(5))
plt.suptitle('SDSS magnitudes')
#------------------------------------------------------------
# Plot datacross-matched with 2MASS
data = fetch_sdss_S82standards(crossmatch_2mass=True)
colors = np.zeros((len(data), 7))
colors[:, 0] = data['mmu_u'] - data['mmu_g']
colors[:, 1] = data['mmu_g'] - data['mmu_r']
colors[:, 2] = data['mmu_r'] - data['mmu_i']
colors[:, 3] = data['mmu_i'] - data['mmu_z']
colors[:, 4] = data['mmu_z'] - data['J']
colors[:, 5] = data['J'] - data['H']
colors[:, 6] = data['H'] - data['K']
labels = ['u-g', 'g-r', 'r-i', 'i-z', 'z-J', 'J-H', 'H-K']
bins = [np.linspace(0.0, 3.5, 100),
np.linspace(0, 2, 100),
np.linspace(-0.2, 1.8, 100),
np.linspace(-0.2, 1.0, 100),
np.linspace(0.5, 2.0, 100),
np.linspace(0.0, 1.0, 100),
np.linspace(-0.4, 0.8, 100)]
fig = plt.figure(figsize=(10, 10))
ax = MultiAxes(7, hspace=0.05, wspace=0.05, fig=fig)
ax.density(colors, bins=bins)
ax.set_labels(labels)
ax.set_locators(plt.MaxNLocator(5))
fig.suptitle('SDSS+2MASS magnitudes')
plt.show()
| bsd-2-clause |
eadgarchen/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
tensorflow/models | research/delf/delf/python/detect_to_retrieve/image_reranking.py | 1 | 12294 | # Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library to re-rank images based on geometric verification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import matplotlib.pyplot as plt
import numpy as np
from scipy import spatial
from skimage import feature
from skimage import measure
from skimage import transform
from delf import feature_io
# Extensions.
_DELF_EXTENSION = '.delf'
# Pace to log.
_STATUS_CHECK_GV_ITERATIONS = 10
# Re-ranking / geometric verification parameters.
_NUM_TO_RERANK = 100
_NUM_RANSAC_TRIALS = 1000
_MIN_RANSAC_SAMPLES = 3
def MatchFeatures(query_locations,
query_descriptors,
index_image_locations,
index_image_descriptors,
ransac_seed=None,
descriptor_matching_threshold=0.9,
ransac_residual_threshold=10.0,
query_im_array=None,
index_im_array=None,
query_im_scale_factors=None,
index_im_scale_factors=None,
use_ratio_test=False):
"""Matches local features using geometric verification.
First, finds putative local feature matches by matching `query_descriptors`
against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an
affine transformation between the putative feature corresponces using their
locations.
Args:
query_locations: Locations of local features for query image. NumPy array of
shape [#query_features, 2].
query_descriptors: Descriptors of local features for query image. NumPy
array of shape [#query_features, depth].
index_image_locations: Locations of local features for index image. NumPy
array of shape [#index_image_features, 2].
index_image_descriptors: Descriptors of local features for index image.
NumPy array of shape [#index_image_features, depth].
ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
descriptor_matching_threshold: Threshold below which a pair of local
descriptors is considered a potential match, and will be fed into RANSAC.
If use_ratio_test==False, this is a simple distance threshold. If
use_ratio_test==True, this is Lowe's ratio test threshold.
ransac_residual_threshold: Residual error threshold for considering matches
as inliers, used in RANSAC algorithm.
query_im_array: Optional. If not None, contains a NumPy array with the query
image, used to produce match visualization, if there is a match.
index_im_array: Optional. Same as `query_im_array`, but for index image.
query_im_scale_factors: Optional. If not None, contains a NumPy array with
the query image scales, used to produce match visualization, if there is a
match. If None and a visualization will be produced, [1.0, 1.0] is used
(ie, feature locations are not scaled).
index_im_scale_factors: Optional. Same as `query_im_scale_factors`, but for
index image.
use_ratio_test: If True, descriptor matching is performed via ratio test,
instead of distance-based threshold.
Returns:
score: Number of inliers of match. If no match is found, returns 0.
match_viz_bytes: Encoded image bytes with visualization of the match, if
there is one, and if `query_im_array` and `index_im_array` are properly
set. Otherwise, it's an empty bytes string.
Raises:
ValueError: If local descriptors from query and index images have different
dimensionalities.
"""
num_features_query = query_locations.shape[0]
num_features_index_image = index_image_locations.shape[0]
if not num_features_query or not num_features_index_image:
return 0, b''
local_feature_dim = query_descriptors.shape[1]
if index_image_descriptors.shape[1] != local_feature_dim:
raise ValueError(
'Local feature dimensionality is not consistent for query and index '
'images.')
# Construct KD-tree used to find nearest neighbors.
index_image_tree = spatial.cKDTree(index_image_descriptors)
if use_ratio_test:
distances, indices = index_image_tree.query(
query_descriptors, k=2, n_jobs=-1)
query_locations_to_use = np.array([
query_locations[i,]
for i in range(num_features_query)
if distances[i][0] < descriptor_matching_threshold * distances[i][1]
])
index_image_locations_to_use = np.array([
index_image_locations[indices[i][0],]
for i in range(num_features_query)
if distances[i][0] < descriptor_matching_threshold * distances[i][1]
])
else:
_, indices = index_image_tree.query(
query_descriptors,
distance_upper_bound=descriptor_matching_threshold,
n_jobs=-1)
# Select feature locations for putative matches.
query_locations_to_use = np.array([
query_locations[i,]
for i in range(num_features_query)
if indices[i] != num_features_index_image
])
index_image_locations_to_use = np.array([
index_image_locations[indices[i],]
for i in range(num_features_query)
if indices[i] != num_features_index_image
])
# If there are not enough putative matches, early return 0.
if query_locations_to_use.shape[0] <= _MIN_RANSAC_SAMPLES:
return 0, b''
# Perform geometric verification using RANSAC.
_, inliers = measure.ransac(
(index_image_locations_to_use, query_locations_to_use),
transform.AffineTransform,
min_samples=_MIN_RANSAC_SAMPLES,
residual_threshold=ransac_residual_threshold,
max_trials=_NUM_RANSAC_TRIALS,
random_state=ransac_seed)
match_viz_bytes = b''
if inliers is None:
inliers = []
elif query_im_array is not None and index_im_array is not None:
if query_im_scale_factors is None:
query_im_scale_factors = [1.0, 1.0]
if index_im_scale_factors is None:
index_im_scale_factors = [1.0, 1.0]
inlier_idxs = np.nonzero(inliers)[0]
_, ax = plt.subplots()
ax.axis('off')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
feature.plot_matches(
ax,
query_im_array,
index_im_array,
query_locations_to_use * query_im_scale_factors,
index_image_locations_to_use * index_im_scale_factors,
np.column_stack((inlier_idxs, inlier_idxs)),
only_matches=True)
match_viz_io = io.BytesIO()
plt.savefig(match_viz_io, format='jpeg', bbox_inches='tight', pad_inches=0)
match_viz_bytes = match_viz_io.getvalue()
return sum(inliers), match_viz_bytes
def RerankByGeometricVerification(input_ranks,
initial_scores,
query_name,
index_names,
query_features_dir,
index_features_dir,
junk_ids,
local_feature_extension=_DELF_EXTENSION,
ransac_seed=None,
descriptor_matching_threshold=0.9,
ransac_residual_threshold=10.0,
use_ratio_test=False):
"""Re-ranks retrieval results using geometric verification.
Args:
input_ranks: 1D NumPy array with indices of top-ranked index images, sorted
from the most to the least similar.
initial_scores: 1D NumPy array with initial similarity scores between query
and index images. Entry i corresponds to score for image i.
query_name: Name for query image (string).
index_names: List of names for index images (strings).
query_features_dir: Directory where query local feature file is located
(string).
index_features_dir: Directory where index local feature files are located
(string).
junk_ids: Set with indices of junk images which should not be considered
during re-ranking.
local_feature_extension: String, extension to use for loading local feature
files.
ransac_seed: Seed used by RANSAC. If None (default), no seed is provided.
descriptor_matching_threshold: Threshold used for local descriptor matching.
ransac_residual_threshold: Residual error threshold for considering matches
as inliers, used in RANSAC algorithm.
use_ratio_test: If True, descriptor matching is performed via ratio test,
instead of distance-based threshold.
Returns:
output_ranks: 1D NumPy array with index image indices, sorted from the most
to the least similar according to the geometric verification and initial
scores.
Raises:
ValueError: If `input_ranks`, `initial_scores` and `index_names` do not have
the same number of entries.
"""
num_index_images = len(index_names)
if len(input_ranks) != num_index_images:
raise ValueError('input_ranks and index_names have different number of '
'elements: %d vs %d' %
(len(input_ranks), len(index_names)))
if len(initial_scores) != num_index_images:
raise ValueError('initial_scores and index_names have different number of '
'elements: %d vs %d' %
(len(initial_scores), len(index_names)))
# Filter out junk images from list that will be re-ranked.
input_ranks_for_gv = []
for ind in input_ranks:
if ind not in junk_ids:
input_ranks_for_gv.append(ind)
num_to_rerank = min(_NUM_TO_RERANK, len(input_ranks_for_gv))
# Load query image features.
query_features_path = os.path.join(query_features_dir,
query_name + local_feature_extension)
query_locations, _, query_descriptors, _, _ = feature_io.ReadFromFile(
query_features_path)
# Initialize list containing number of inliers and initial similarity scores.
inliers_and_initial_scores = []
for i in range(num_index_images):
inliers_and_initial_scores.append([0, initial_scores[i]])
# Loop over top-ranked images and get results.
print('Starting to re-rank')
for i in range(num_to_rerank):
if i > 0 and i % _STATUS_CHECK_GV_ITERATIONS == 0:
print('Re-ranking: i = %d out of %d' % (i, num_to_rerank))
index_image_id = input_ranks_for_gv[i]
# Load index image features.
index_image_features_path = os.path.join(
index_features_dir,
index_names[index_image_id] + local_feature_extension)
(index_image_locations, _, index_image_descriptors, _,
_) = feature_io.ReadFromFile(index_image_features_path)
inliers_and_initial_scores[index_image_id][0], _ = MatchFeatures(
query_locations,
query_descriptors,
index_image_locations,
index_image_descriptors,
ransac_seed=ransac_seed,
descriptor_matching_threshold=descriptor_matching_threshold,
ransac_residual_threshold=ransac_residual_threshold,
use_ratio_test=use_ratio_test)
# Sort based on (inliers_score, initial_score).
def _InliersInitialScoresSorting(k):
"""Helper function to sort list based on two entries.
Args:
k: Index into `inliers_and_initial_scores`.
Returns:
Tuple containing inlier score and initial score.
"""
return (inliers_and_initial_scores[k][0], inliers_and_initial_scores[k][1])
output_ranks = sorted(
range(num_index_images), key=_InliersInitialScoresSorting, reverse=True)
return output_ranks
| apache-2.0 |
ndingwall/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 18 | 2882 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
X, y = datasets.load_diabetes(return_X_y=True)
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps=eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps=eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ibis-project/ibis-bigquery | ibis_bigquery/backcompat.py | 1 | 3812 | """Helpers to make this backend compatible with Ibis versions < 2.0.
Keep in sync with:
https://github.com/ibis-project/ibis/blob/master/ibis/backends/base/__init__.py
TODO: Remove this after Ibis 2.0 release and support for earlier versions of
Ibis < 2.0 is dropped.
"""
import abc
try:
from ibis.common.exceptions import TranslationError
except ImportError:
# 1.2
from ibis.common import TranslationError
__all__ = ('BaseBackend',)
class BaseBackend(abc.ABC):
"""
Base backend class.
All Ibis backends are expected to subclass this `Backend` class,
and implement all the required methods.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
Name of the backend, for example 'sqlite'.
"""
pass
@property
@abc.abstractmethod
def kind(self):
"""
Backend kind. One of:
sqlalchemy
Backends using a SQLAlchemy dialect.
sql
SQL based backends, not based on a SQLAlchemy dialect.
pandas
Backends using pandas to store data and perform computations.
spark
Spark based backends.
"""
pass
@property
@abc.abstractmethod
def builder(self):
pass
@property
@abc.abstractmethod
def translator(self):
pass
@property
def dialect(self):
"""
Dialect class of the backend.
We generate it dynamically to avoid repeating the code for each
backend.
"""
# TODO importing dialects inside the function to avoid circular
# imports. In the future instead of this if statement we probably
# want to create subclasses for each of the kinds
# (e.g. `BaseSQLAlchemyBackend`)
# TODO check if the below dialects can be merged into a single one
if self.kind == 'sqlalchemy':
from ibis.backends.base_sqlalchemy.alchemy import AlchemyDialect
dialect_class = AlchemyDialect
elif self.kind in ('sql', 'pandas'):
try:
from ibis.backends.base_sqlalchemy.compiler import Dialect
except ImportError:
from ibis.sql.compiler import Dialect
dialect_class = Dialect
elif self.kind == 'spark':
from ibis.backends.base_sql.compiler import BaseDialect
dialect_class = BaseDialect
else:
raise ValueError(
f'Backend class "{self.kind}" unknown. '
'Expected one of "sqlalchemy", "sql", '
'"pandas" or "spark".'
)
dialect_class.translator = self.translator
return dialect_class
@abc.abstractmethod
def connect(connection_string, **options):
"""
Connect to the underlying database and return a client object.
"""
pass
def register_options(self):
"""
If the backend has custom options, register them here.
They will be prefixed with the name of the backend.
"""
pass
def compile(self, expr, params=None):
"""
Compile the expression.
"""
context = self.dialect.make_context(params=params)
builder = self.builder(expr, context=context)
query_ast = builder.get_result()
# TODO make all builders return a QueryAST object
if isinstance(query_ast, list):
query_ast = query_ast[0]
compiled = query_ast.compile()
return compiled
def verify(self, expr, params=None):
"""
Verify `expr` is an expression that can be compiled.
"""
try:
self.compile(expr, params=params)
return True
except TranslationError:
return False
| apache-2.0 |
bennames/AeroComBAT-Project | Tutorials/Validations/flutterValidation.py | 1 | 3742 | # =============================================================================
# HEPHAESTUS VALIDATION 4 - MESHER AND CROSS-SECTIONAL ANALYSIS
# =============================================================================
# IMPORTS:
import sys
import os
import cProfile
sys.path.append(os.path.abspath('..'))
from AeroComBAT.Structures import MaterialLib
from AeroComBAT.AircraftParts import Wing
from AeroComBAT.FEM import Model
import numpy as np
# Add the material property
matLib = MaterialLib()
matLib.addMat(1,'AS43501-6','trans_iso',[20.6e6,1.42e6,.3,.34,.87e6,0.057/386.09],0.005)
matLib.addMat(3,'AS43501-6*','trans_iso',[20.6e6,1.42e6,.34,.42,.87e6,0.057/386.09],0.005)
matLib.addMat(2,'AL','iso',[9900000.,.33,2.53881E-4],.005)
# Layup 3 Configuration
n_i_1 = [1,1,1,1,1,1]
m_i_1 = [2,2,2,2,2,2]
th_1 = [-15,-15,-15,-15,-15,-15]
n_i_2 = [1,1,1,1,1,1]
m_i_2 = [2,2,2,2,2,2]
th_2 = [15,-15,15,-15,15,-15]
n_i_3 = [1,1,1,1,1,1]
m_i_3 = [2,2,2,2,2,2]
th_3 = [15,15,15,15,15,15]
n_i_4 = [1,1,1,1,1,1]
m_i_4 = [2,2,2,2,2,2]
th_4 = [-15,15,-15,15,-15,15]
# Concatenated layup schedule arrays
n_ply = n_i_1+n_i_2+n_i_3+n_i_4
m_ply = m_i_1+m_i_2+m_i_3+m_i_4
th_ply = th_1+th_2+th_3+th_4
# Define the chord length of the box beam
croot = 0.53
ctip = 0.53
# Define nd starting and stopping points of the cross-section
x1 = -0.8990566037735849
x2 = 0.8990566037735849
p1 = np.array([0.,0.,0.])
p2 = np.array([0.,8.05,0.])
Y_rib = np.linspace(0.,1.,2)
noe_dens = 6
chordVec=np.array([-1.,0.,0.])
wing1 = Wing(1,p1,p2,croot,ctip,x1,x2,Y_rib,n_ply,m_ply,matLib,name='box',\
noe=noe_dens,chordVec=chordVec,ref_ax='origin',th_ply=th_ply,typeXSect='rectBox',n_orients=6)
sbeam1 = wing1.wingSects[0].SuperBeams[0]
# Add lifting surface to wing
x1 = np.array([-4*croot,0.,0.])
x2 = np.array([4*croot,0.,0.])
x3 = np.array([4*croot,p2[1],0.])
x4 = np.array([-4*croot,p2[1],0.])
nspan = 36/2
nchord = 20/2
wing1.addLiftingSurface(1,x1,x2,x3,x4,nspan,nchord)
# Make a FEM model
model = Model()
model.addAircraftParts([wing1])
model.plotRigidModel(numXSects=10)
# Apply the constraint for the model
model.applyConstraints(0,'fix')
#cProfile.run('model.normalModesAnalysis()',sort='tottime')
# Composite Normal Mode Analysis
model.normalModesAnalysis()
freqs = model.freqs
'''
model.plotDeformedModel(figName='Normal Mode 1',numXSects=10,contLim=[0,293000],\
warpScale=25,displScale=10,contour='none',mode=1)
model.plotDeformedModel(figName='normalMode 2',numXSects=10,contLim=[0,293000],\
warpScale=25,displScale=10,contour='none',mode=2)
model.plotDeformedModel(figName='normalMode 3',numXSects=10,contLim=[0,293000],\
warpScale=25,displScale=10,contour='none',mode=3)
model.plotDeformedModel(figName='normalMode 4',numXSects=10,contLim=[0,293000],\
warpScale=25,displScale=10,contour='none',mode=4)
model.plotDeformedModel(figName='normalMode 5',numXSects=10,contLim=[0,293000],\
warpScale=25,displScale=10,contour='none',mode=5)
'''
# Flutter Analysis
# Note units are inches, seconds, and pounds
U_vec = np.linspace(1,100,100)
kr_vec = np.array([.001,.005,.01,.05,.1,.5,1.,5.,10.,50.,100.,500.,1000.,5000.])
M_vec = [0.]*len(kr_vec)
# In slinch
rho_0 = 1.225
nmodes = 6
model.flutterAnalysis(U_vec,kr_vec,M_vec,croot*4,rho_0,nmodes,symxz=True,g=.01)
# flutter plots
import matplotlib.pyplot as plt
plt.figure(1)
plt.hold(True)
for PID, point in model.flutterPoints.iteritems():
plt.plot(U_vec,point.gamma,label='mode'+str(PID))
plt.legend(loc=3)
#plt.ylim([-.001,150])
plt.grid(True)
plt.hold(False)
plt.figure(2)
plt.hold(True)
for PID, point in model.flutterPoints.iteritems():
plt.plot(U_vec,point.omega,label='mode'+str(PID))
plt.legend(loc=1)
#plt.ylim([0,150])
plt.grid(True)
plt.hold(False) | mit |
lfairchild/PmagPy | programs/foldtest.py | 1 | 5959 | #!/usr/bin/env python
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pylab
import pmagpy.pmag as pmag
from pmag_env import set_env
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
foldtest.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
dec inc dip_direction dip
SYNTAX
foldtest.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE file with input data
-F FILE for confidence bounds on fold test
-u ANGLE (circular standard deviation) for uncertainty on bedding poles
-b MIN MAX bounds for quick search of percent untilting [default is -10 to 150%]
-n NB number of bootstrap samples [default is 1000]
-fmt FMT, specify format - default is svg
-sav save figures and quit
INPUT FILE
Dec Inc Dip_Direction Dip in space delimited file
OUTPUT PLOTS
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a post-tilt magnetization is indicated
If the 95% conf bounds include 100, then a pre-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
Geographic: is an equal area projection of the input data in
OPTIONAL OUTPUT FILE:
The output file has the % untilting within the 95% confidence bounds
nd the number of bootstrap samples
"""
kappa=0
fmt,plot='svg',0
nb=1000 # number of bootstraps
min,max=-10,150
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w')
else:
outfile=""
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
DIDDs=numpy.loadtxt(file)
else:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-b' in sys.argv:
ind=sys.argv.index('-b')
min=int(sys.argv[ind+1])
max=int(sys.argv[ind+2])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-u' in sys.argv:
ind=sys.argv.index('-u')
csd=float(sys.argv[ind+1])
kappa=(81. / csd)**2
#
# get to work
#
PLTS={'geo':1,'strat':2,'taus':3} # make plot dictionary
pmagplotlib.plot_init(PLTS['geo'],5,5)
pmagplotlib.plot_init(PLTS['strat'],5,5)
pmagplotlib.plot_init(PLTS['taus'],5,5)
pmagplotlib.plot_eq(PLTS['geo'],DIDDs,'Geographic')
D,I=pmag.dotilt_V(DIDDs)
TCs=numpy.array([D,I]).transpose()
pmagplotlib.plot_eq(PLTS['strat'],TCs,'Stratigraphic')
if not set_env.IS_WIN:
if plot==0:pmagplotlib.draw_figs(PLTS)
Percs=list(range(min,max))
Cdf,Untilt=[],[]
pylab.figure(num=PLTS['taus'])
print('doing ',nb,' iterations...please be patient.....')
for n in range(nb): # do bootstrap data sets - plot first 25 as dashed red line
if n%50==0:print(n)
Taus=[] # set up lists for taus
PDs=pmag.pseudo(DIDDs)
if kappa!=0:
for k in range(len(PDs)):
d,i=pmag.fshdev(kappa)
dipdir,dip=pmag.dodirot(d,i,PDs[k][2],PDs[k][3])
PDs[k][2]=dipdir
PDs[k][3]=dip
for perc in Percs:
tilt=numpy.array([1.,1.,1.,0.01*perc])
D,I=pmag.dotilt_V(PDs*tilt)
TCs=numpy.array([D,I]).transpose()
ppars=pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n<25:pylab.plot(Percs,Taus,'r--')
Untilt.append(Percs[Taus.index(numpy.max(Taus))]) # tilt that gives maximum tau
Cdf.append(float(n) / float(nb))
pylab.plot(Percs,Taus,'k')
pylab.xlabel('% Untilting')
pylab.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
pylab.plot(Untilt,Cdf,'g')
lower=int(.025*nb)
upper=int(.975*nb)
pylab.axvline(x=Untilt[lower],ymin=0,ymax=1,linewidth=1,linestyle='--')
pylab.axvline(x=Untilt[upper],ymin=0,ymax=1,linewidth=1,linestyle='--')
tit= '%i - %i %s'%(Untilt[lower],Untilt[upper],'Percent Unfolding')
print(tit)
print('range of all bootstrap samples: ', Untilt[0], ' - ', Untilt[-1])
pylab.title(tit)
outstring= '%i - %i; %i\n'%(Untilt[lower],Untilt[upper],nb)
if outfile!="":outfile.write(outstring)
files={}
for key in list(PLTS.keys()):
files[key]=('foldtest_'+'%s'%(key.strip()[:2])+'.'+fmt)
if plot==0:
pmagplotlib.draw_figs(PLTS)
ans= input('S[a]ve all figures, <Return> to quit ')
if ans!='a':
print("Good bye")
sys.exit()
pmagplotlib.save_plots(PLTS,files)
main()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/projections/__init__.py | 3 | 2213 | from geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes,
MollweideAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| gpl-2.0 |
ErBa508/data-science-from-scratch | code/linear_algebra.py | 49 | 3637 | # -*- coding: iso-8859-15 -*-
from __future__ import division # want 3 / 2 == 1.5
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| unlicense |
googlearchive/rgc-models | response_model/python/metric_learning/metric_learn_hard_examples.py | 1 | 5512 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r""""Learn a simple metric by mining hard examples.
Args:
--save_suffix='_hard_examples' --lam_l1=0.001 --data_train='example_long_wn_2rep_ON_OFF.mat' --triplet_type='a' --model='quadratic'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from google3.pyglib import app
import retina.response_model.python.metric_learning.config as config
import retina.response_model.python.metric_learning.data_util as du
import retina.response_model.python.metric_learning.score_fcns.quadratic_score as quad
import retina.response_model.python.metric_learning.score_fcns.mrf as mrf
FLAGS = tf.app.flags.FLAGS
def main(unused_argv=()):
# get details to store model
model_savepath, model_filename = config.get_filepaths()
print('lam l1 is: '+ str(FLAGS.lam_l1))
# load responses to two trials of long white noise
data_wn = du.DataUtilsMetric(os.path.join(FLAGS.data_path, FLAGS.data_train))
# quadratic score function
with tf.Session() as sess:
# Initialize the model.
tf.logging.info('Model : %s ' % FLAGS.model)
if FLAGS.model == 'quadratic':
score = quad.QuadraticScore(sess, model_savepath,
model_filename,
n_cells=data_wn.n_cells,
time_window=FLAGS.time_window,
lr=FLAGS.learning_rate,
lam_l1=FLAGS.lam_l1)
if FLAGS.model == 'mrf':
score = mrf.MRFScore(sess, model_savepath, model_filename,
n_cells=data_wn.n_cells,
time_window=FLAGS.time_window,
lr=FLAGS.learning_rate,
lam_l1=FLAGS.lam_l1,
cell_centers=data_wn.get_centers(),
neighbor_threshold=FLAGS.neighbor_threshold)
# Learn the metric.
# Set triplet type.
if FLAGS.triplet_type == 'a':
triplet_fcn = data_wn.get_triplets
if FLAGS.triplet_type == 'b':
triplet_fcn = data_wn.get_tripletsB
# Get test data.
outputs = triplet_fcn(batch_size=FLAGS.batch_size_train,
time_window=FLAGS.time_window)
anchor_test = outputs[0]
pos_test = outputs[1]
neg_test = outputs[2]
triplet_test = [anchor_test, pos_test, neg_test]
# Learn the metric.
loss_test_log = []
loss_train_log = []
hard_iters = 200 # when to start showing hard examples.
# plt.ion()
for score.iter in range(score.iter, FLAGS.max_iter):
# Get new training batch.
if score.iter > hard_iters:
batch_scale = 3
else:
batch_scale = 1
outputs = triplet_fcn(batch_size=FLAGS.batch_size_train*batch_scale,
time_window=FLAGS.time_window)
anchor_batch = outputs[0]
pos_batch = outputs[1]
neg_batch = outputs[2]
if score.iter > hard_iters:
dd_neg = score.get_distance(anchor_batch, neg_batch)
dd_pos = score.get_distance(anchor_batch, pos_batch)
dd_diff = dd_pos - dd_neg
top_examples = np.argsort(dd_diff)[::-1]
choose_examples = top_examples[:FLAGS.batch_size_train]
anchor_batch = anchor_batch[choose_examples, :, :]
pos_batch = pos_batch[choose_examples, :, :]
neg_batch = neg_batch[choose_examples, :, :]
# from IPython import embed; embed()
triplet_batch = [anchor_batch, pos_batch, neg_batch]
loss_train = score.update(triplet_batch)
if score.iter % 10 == 0:
# Run tests regularly.
loss_test = sess.run(score.loss, {score.anchor:
triplet_test[0],
score.pos: triplet_test[1],
score.neg: triplet_test[2]})
loss_test_log += [loss_test] # This log is unused right now.
loss_train_log += [loss_train]
if score.iter % 10 == 0:
tf.logging.info('Iteration: %d, Loss : %.3f, Loss test : %.3f' %
(score.iter, loss_train, loss_test))
'''
plt.clf()
plt.subplot(1, 3, 1)
plt.plot(loss_train_log, 'k')
plt.title('Train')
plt.subplot(1, 3, 2)
plt.plot(loss_test_log, 'k')
plt.title('Test')
plt.subplot(1, 3, 3)
plt.imshow(sess.run(score.A_symm), interpolation='nearest', cmap='gray')
plt.title('A')
plt.show()
plt.draw()
plt.pause(0.1)
'''
if score.iter % 1000 == 0:
score.save_model()
tf.logging.info('Model saved')
if __name__ == '__main__':
app.run()
| apache-2.0 |
DataDog/vbench | vbench/git.py | 3 | 9948 | from dateutil import parser
import subprocess
import os
import shutil
import numpy as np
from pandas import Series, DataFrame, Panel
from vbench.utils import run_cmd
import logging
log = logging.getLogger('vb.git')
class Repo(object):
def __init__(self):
raise NotImplementedError
class GitRepo(Repo):
"""
Read some basic statistics about a git repository
"""
def __init__(self, repo_path):
log.info("Initializing GitRepo to look at %s" % repo_path)
self.repo_path = repo_path
self.git = _git_command(self.repo_path)
(self.shas, self.messages,
self.timestamps, self.authors) = self._parse_commit_log()
@property
def commit_date(self):
from pandas.core.datetools import normalize_date
return self.timestamps.map(normalize_date)
def _parse_commit_log(self):
log.debug("Parsing the commit log of %s" % self.repo_path)
githist = self.git + ('log --graph --pretty=format:'
'\"::%h::%cd::%s::%an\" > githist.txt')
os.system(githist)
githist = open('githist.txt').read()
os.remove('githist.txt')
shas = []
timestamps = []
messages = []
authors = []
for line in githist.split('\n'):
# skip commits not in mainline
if not line[0] == '*':
continue
# split line into three real parts, ignoring git-graph in front
_, sha, stamp, message, author = line.split('::', 4)
# parse timestamp into datetime object
stamp = parser.parse(stamp)
# avoid duplicate timestamps by ignoring them
# presumably there is a better way to deal with this
if stamp in timestamps:
continue
shas.append(sha)
timestamps.append(stamp)
messages.append(message)
authors.append(author)
# to UTC for now
timestamps = _convert_timezones(timestamps)
shas = Series(shas, timestamps)
messages = Series(messages, shas)
timestamps = Series(timestamps, shas)
authors = Series(authors, shas)
return shas[::-1], messages[::-1], timestamps[::-1], authors[::-1]
def get_churn(self, omit_shas=None, omit_paths=None):
churn = self.get_churn_by_file()
if omit_paths is not None:
churn = churn.drop(omit_paths, axis='major')
if omit_shas is not None:
churn = churn.drop(omit_shas, axis='minor')
# sum files and add insertions + deletions
by_commit = churn.sum('major').sum(1)
by_date = by_commit.groupby(self.commit_date).sum()
return by_date
def get_churn_by_file(self):
hashes = self.shas.values
prev = hashes[0]
insertions = {}
deletions = {}
for cur in hashes[1:]:
i, d = self.diff(cur, prev)
insertions[cur] = i
deletions[cur] = d
prev = cur
return Panel({'insertions': DataFrame(insertions),
'deletions': DataFrame(deletions)},
minor_axis=hashes)
def diff(self, sha, prev_sha):
cmdline = self.git.split() + ['diff', sha, prev_sha, '--numstat']
stdout = subprocess.Popen(cmdline, stdout=subprocess.PIPE).stdout
stdout = stdout.read()
insertions = {}
deletions = {}
for line in stdout.split('\n'):
try:
i, d, path = line.split('\t')
insertions[path] = int(i)
deletions[path] = int(d)
except Exception: # EAFP
pass
# statline = stdout.split('\n')[-2]
# match = re.match('.*\s(.*)\sinsertions.*\s(.*)\sdeletions', statline)
# insertions = int(match.group(1))
# deletions = int(match.group(2))
return insertions, deletions
def checkout(self, sha):
pass
class BenchRepo(object):
"""
Manage an isolated copy of a repository for benchmarking
"""
def __init__(self, source_url, target_dir, build_cmds, prep_cmd,
clean_cmd=None, dependencies=None, always_clean=False):
self.source_url = source_url
self.target_dir = target_dir
self.target_dir_tmp = target_dir + '_tmp'
self.build_cmds = build_cmds
self.prep_cmd = prep_cmd
self.clean_cmd = clean_cmd
self.dependencies = dependencies
self.always_clean = always_clean
self._clean_checkout()
self._copy_repo()
def _clean_checkout(self):
log.debug("Clean checkout of %s from %s"
% (self.source_url, self.target_dir_tmp))
self._clone(self.source_url, self.target_dir_tmp, rm=True)
def _copy_repo(self):
log.debug("Repopulating %s" % self.target_dir)
self._clone(self.target_dir_tmp, self.target_dir, rm=True)
self._prep()
def _clone(self, source, target, rm=False):
log.info("Cloning %s over to %s" % (source, target))
if os.path.exists(target):
if rm:
log.info('Deleting %s first' % target)
# response = raw_input('%s exists, delete? y/n' % self.target_dir)
# if response == 'n':
# raise Exception('foo')
# yoh: no need to divert from Python
#run_cmd('rm -rf %s' % self.target_dir)
shutil.rmtree(target)
else:
raise RuntimeError("Target directory %s already exists. "
"Can't clone into it" % target)
run_cmd(['git', 'clone', source, target])
def _copy_benchmark_scripts_and_deps(self):
pth, _ = os.path.split(os.path.abspath(__file__))
deps = [os.path.join(pth, 'scripts/vb_run_benchmarks.py')]
if self.dependencies is not None:
deps.extend(self.dependencies)
for dep in deps:
proc = run_cmd('cp %s %s' % (dep, self.target_dir), shell=True)
def switch_to_revision(self, rev):
"""
rev: git SHA
"""
log.info("Switching to revision %s", rev)
if self.always_clean:
self.hard_clean()
else:
self._clean()
self._checkout(rev)
self._copy_benchmark_scripts_and_deps()
self._clean_pyc_files()
self._build()
def _checkout(self, rev):
git = _git_command(self.target_dir)
rest = 'checkout -f %s' % rev
args = git.split() + rest.split()
# checkout of a detached commit would always produce stderr
proc = run_cmd(args, stderr_levels=('debug', 'error'))
def _build(self):
cmd = ';'.join([x for x in self.build_cmds.split('\n')
if len(x.strip()) > 0])
proc = run_cmd(cmd, shell=True, cwd=self.target_dir)
def _prep(self):
cmd = ';'.join([x for x in self.prep_cmd.split('\n')
if len(x.strip()) > 0])
proc = run_cmd(cmd, shell=True, cwd=self.target_dir)
def _clean(self):
if not self.clean_cmd:
return
cmd = ';'.join([x for x in self.clean_cmd.split('\n')
if len(x.strip()) > 0])
proc = run_cmd(cmd, shell=True, cwd=self.target_dir)
def hard_clean(self):
self._copy_repo()
def _clean_pyc_files(self, extensions=('.pyc', '.pyo')):
clean_me = []
for root, dirs, files in list(os.walk(self.target_dir)):
for f in files:
if os.path.splitext(f)[-1] in extensions:
clean_me.append(os.path.join(root, f))
for path in clean_me:
try:
os.unlink(path)
except Exception:
pass
def _convert_timezones(stamps):
# tz = config.TIME_ZONE
def _convert(dt):
offset = dt.tzinfo.utcoffset(dt)
dt = dt.replace(tzinfo=None)
dt = dt - offset
return dt
return [_convert(x) for x in stamps]
def _git_command(repo_path):
return ('git --git-dir=%s/.git --work-tree=%s ' % (repo_path, repo_path))
def get_commit_history():
# return TimeSeries
rungithist()
githist = open('githist.txt').read()
os.remove('githist.txt')
sha_date = []
for line in githist.split('\n'):
sha_date.append(line.split()[:2])
return Series(dates, shas), hists
def get_commit_churn(sha, prev_sha):
# TODO: handle stderr
stdout = subprocess.Popen(['git', 'diff', sha, prev_sha, '--numstat'],
stdout=subprocess.PIPE).stdout
stdout = stdout.read()
insertions = {}
deletions = {}
for line in stdout.split('\n'):
try:
i, d, path = line.split('\t')
insertions[path] = int(i)
deletions[path] = int(d)
except: # EAFP
pass
# statline = stdout.split('\n')[-2]
# match = re.match('.*\s(.*)\sinsertions.*\s(.*)\sdeletions', statline)
# insertions = int(match.group(1))
# deletions = int(match.group(2))
return insertions, deletions
def get_code_churn(commits):
shas = commits.index[::-1]
prev = shas[0]
insertions = [np.nan]
deletions = [np.nan]
insertions = {}
deletions = {}
for cur in shas[1:]:
i, d = get_commit_churn(cur, prev)
insertions[cur] = i
deletions[cur] = d
# insertions.append(i)
# deletions.append(d)
prev = cur
return Panel({'insertions': DataFrame(insertions),
'deletions': DataFrame(deletions)}, minor_axis=shas)
# return DataFrame({'insertions' : insertions,
# 'deletions' : deletions}, index=shas)
if __name__ == '__main__':
repo_path = '/home/wesm/code/pandas' # XXX: specific?
repo = GitRepo(repo_path)
by_commit = 5
| mit |
AndreasMadsen/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 13 | 4470 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,
FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,
FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
jplourenco/bokeh | examples/interactions/us_marriages_divorces/us_marriages_divorces_interactive.py | 26 | 3437 | # coding: utf-8
# Plotting U.S. marriage and divorce statistics
#
# Example code by Randal S. Olson (http://www.randalolson.com)
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool, NumeralTickFormatter
from bokeh.models import SingleIntervalTicker, LinearAxis
import pandas as pd
# Since the data set is loaded in the bokeh data repository, we can do this:
from bokeh.sampledata.us_marriages_divorces import data
md_data = data.copy()
# Fill in missing data with a simple linear interpolation
md_data = md_data.interpolate(method='linear', axis=0).ffill().bfill()
# Tell Bokeh where to save the interactive chart
output_file('us_marriages_divorces_per_capita.html',
# Tell Bokeh to use its minified JavaScript hosted on a
# cdn instead of putting the Bokeh JS in the output file
# Warning: This makes it so people can only view the
# chart with an internet connection
mode='cdn',
title='144 years of marriage and divorce in the U.S.A.')
# Set up the data sources for the lines we'll be plotting.
# We need separate data sources for each line because we're
# displaying different data in the hover tool.
source_marriages = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Marriages_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Marriages_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['marriages per 1,000 people'] * len(md_data),
)
)
source_divorces = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Divorces_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Divorces_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['divorces and annulments per 1,000 people'] * len(md_data),
)
)
# Use HTML to mark up the tooltip that displays over the chart
# Note that the variables in the data sources (above) are referenced with a @
hover = HoverTool(
tooltips='<font face="Arial" size="3">@y_text @desc in @x</font>')
# Select the tools that will be available to the chart
TOOLS = ['pan,wheel_zoom,box_zoom,reset,save,resize'] + [hover]
bplot = figure(tools=TOOLS, width=800, height=500, x_axis_type=None)
# Create a custom x-axis with 10-year intervals
ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
xaxis = LinearAxis(ticker=ticker)
bplot.add_layout(xaxis, 'below')
# Customize the y-axis
bplot.yaxis.formatter = NumeralTickFormatter(format='0.0a')
bplot.yaxis.axis_label = '# per 1,000 people'
# Provide a descriptive title for the chart
bplot.title = '144 years of marriage and divorce in the U.S.'
# Finally, plot the data!
# Note that the data source determines what is plotted and what shows in
# the tooltips
bplot.line('x', 'y', color='#1f77b4', line_width=3, source=source_marriages)
bplot.line('x', 'y', color='#ff7f0e', line_width=3, source=source_divorces)
show(bplot)
| bsd-3-clause |
mrcslws/nupic.research | projects/rsm/util.py | 3 | 18918 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
from torch.nn.functional import cosine_similarity
def square_size(n):
side = int(np.sqrt(n))
if side ** 2 < n:
side += 1
return side
def activity_square(vector):
n = len(vector)
side = square_size(n)
square = torch.zeros(side ** 2)
square[:n] = vector
return square.view(side, side)
def fig2img(fig):
canvas = FigureCanvas(fig)
canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
return img
def plot_confusion_matrix(
y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues
):
"""
This function plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# Compute confusion matrix
cm = confusion_matrix(y_true.cpu(), y_pred.cpu())
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
fig = Figure()
ax = fig.gca()
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(
xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel="True label",
xlabel="Predicted label",
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black",
)
return ax, fig
def plot_activity_grid(distrs, n_labels=10):
"""
For flattened models, plot cell activations for each combination of
input and actual next input
"""
fig, axs = plt.subplots(
n_labels,
n_labels,
dpi=300,
gridspec_kw={"hspace": 0.7, "wspace": 0.7},
sharex=True,
sharey=True,
)
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax = axs[i][j]
mean_act = activity_square(dist.mean(dim=0).cpu())
side = mean_act.size(0)
ax.imshow(mean_act, origin="bottom", extent=(0, side, 0, side))
else:
ax.set_visible(False)
ax.axis("off")
ax.set_title(key, fontsize=5)
return fig
def plot_activity(distrs, n_labels=10, level="column"):
"""
Plot column activations for each combination of input and actual next input
Should show mini-column union activity (subsets of column-level activity
which predict next input) in the RSM model.
"""
n_plots = len(distrs.keys())
fig, axs = plt.subplots(n_plots, 1, dpi=300, gridspec_kw={"hspace": 0.7})
pi = 0
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax = axs[pi]
pi += 1
bsz, m, n = dist.size()
no_columns = n == 1
col_act = dist.max(dim=2).values
if level == "column" or no_columns:
act = col_act
elif level == "cell":
col = col_act.view(bsz, m, 1)
act = torch.cat((dist, col), 2).view(bsz, m, n + 1)
mean_act = act.mean(dim=0).cpu()
if no_columns:
mean_act = activity_square(mean_act)
side = mean_act.size(0)
ax.imshow(mean_act, origin="bottom", extent=(0, side, 0, side))
else:
ax.imshow(
mean_act.t(), origin="bottom", extent=(0, m - 1, 0, n + 1)
)
ax.plot([0, m - 1], [n, n], linewidth=0.4)
ax.axis("off")
ax.set_title(key, fontsize=5)
return fig
def _repr_similarity_grid(
ax,
activity_arr,
cmap=plt.cm.Blues,
normalize=False,
labels=None,
title=None,
tick_fontsize=2,
fontsize=1.2,
):
n_labels = len(labels)
grid = torch.zeros(n_labels, n_labels)
# Compute grid (cosine similarity)
for i, act1 in enumerate(activity_arr):
for j, act2 in enumerate(activity_arr):
if j > i:
break
if act1 is not None and act2 is not None:
sim = cosine_similarity(act1, act2, dim=0)
grid[i, j] = grid[j, i] = sim
ax.imshow(grid, interpolation="nearest", cmap=cmap, vmin=0, vmax=1)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(
xticks=np.arange(grid.shape[1]),
yticks=np.arange(grid.shape[0]),
# ... and label them with the respective list entries
xticklabels=labels,
yticklabels=labels,
title=title,
)
ax.tick_params(labelsize=tick_fontsize)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
thresh = grid.max() / 2.0
for i in range(grid.shape[0]):
for j in range(grid.shape[1]):
ax.text(
j,
i,
format(grid[i, j], ".2f"),
ha="center",
va="center",
fontsize=fontsize,
color="white" if grid[i, j] > thresh else "black",
)
def plot_representation_similarity(
distrs, n_labels=10, title=None, save=None, fontsize=1.6
):
"""
Plot grid showing representation similarity between distributions passed
into distrs dict.
"""
fig, axs = plt.subplots(1, 2, dpi=300)
ax_id = 0
col_activities = []
cell_activities = []
labels = []
for i in range(n_labels):
for j in range(n_labels):
key = "%d-%d" % (i, j)
col_act = cell_act = None
if key in distrs:
activity_arr = distrs[key]
dist = torch.stack(activity_arr)
ax_id += 1
size = dist.size()
if len(size) == 3:
bsz, m, n = size
tc = m * n
else:
bsz, m = size
tc = m
if m != tc:
col_act = (
dist.max(dim=-1).values.view(bsz, m).mean(dim=0).flatten().cpu()
)
col_activities.append(col_act)
# TODO: Check reshaping here
cell_act = dist.view(bsz, tc).mean(dim=0).flatten().cpu()
labels.append(key)
cell_activities.append(cell_act)
if col_activities:
_repr_similarity_grid(
axs[0], col_activities, labels=labels, title="Column", fontsize=fontsize
)
_repr_similarity_grid(
axs[1], cell_activities, labels=labels, title="Cell", fontsize=fontsize
)
suptitle = "Repr Similarity (Cos)"
if title:
suptitle += " - " + title
fig.suptitle(suptitle)
if save:
fig.savefig(save)
return fig
def get_grad_printer(msg):
"""
This function returns a printer function, that prints information about a
tensor's gradient. Used by register_hook in the backward pass.
"""
def printer(grad):
if grad.nelement() == 1:
print(f"{msg} {grad}")
else:
print(
f"{msg} shape: {grad.shape}"
f" {len(grad.nonzero())}/{grad.numel()} nonzero"
f" max: {grad.max()} min: {grad.min()}"
f" mean: {grad.mean()}"
)
return printer
def count_parameters(model, exclude=None):
params = 0
for n, p in model.named_parameters():
if p.requires_grad and (exclude is None or exclude not in n):
params += p.numel()
return params
def print_epoch_values(ret):
"""
Print dictionary of epoch values with large arrays removed
"""
print_ret = {}
for key, _val in ret.items():
if not key.startswith("img_") and not key.startswith("hist_"):
print_ret[key] = ret[key]
return print_ret
def _plot_grad_flow(model, top=0.01):
"""
Plots the gradients flowing through different layers in the net during
training. Can be used for checking for possible gradient
vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow
"""
ave_grads = []
max_grads = []
layers = []
for n, p in model.named_parameters():
if (p.requires_grad) and ("bias" not in n):
zg = False
if p.grad is not None:
pmax = p.grad.abs().max()
ave_grads.append(p.grad.abs().mean())
max_grads.append(pmax)
zg = pmax == 0
else:
ave_grads.append(0)
max_grads.append(0)
zg = True
if zg:
n += " *"
layers.append(n)
print("Gradients", max_grads)
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads) + 1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=top) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow (* indicates 0 grad)")
plt.grid(True)
labels = ["max-gradient", "mean-gradient", "zero-gradient"]
plt.legend(
[
Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4),
],
labels,
)
def plot_cluster_weights(model):
# Switched to standard PCA
# To identify column formation we'll need to combine weights
# linear_a_int/linear_b_int since clusters may include cells
# across FF and predictive partitions
pca_3d = PCA(n_components=3)
w_a = model.linear_a.weight.data.cpu()
w_b = model.linear_b.weight.data.cpu()
fig, axs = plt.subplots(1, 2, dpi=200)
w_a_emb = pca_3d.fit_transform(w_a)
axs[0].scatter(w_a_emb[:, 0], w_a_emb[:, 1], c=w_a_emb[:, 2], s=1.5, alpha=0.6)
axs[0].set_title("FF input - %d cells" % w_a.shape[0])
if len(w_b):
w_b_emb = pca_3d.fit_transform(w_b)
axs[1].scatter(w_b_emb[:, 0], w_b_emb[:, 1], c=w_b_emb[:, 2], s=1.5, alpha=0.6)
axs[1].set_title("Rec input - %d cells" % w_b.shape[0])
return fig
def print_aligned_sentences(s1, s2, labels=None):
widths = []
s1 = s1.split()
s2 = s2.split()
for w1, w2 in zip(s1, s2):
widths.append(max([len(w1), len(w2)]))
out1 = out2 = ""
for w1, w2, width in zip(s1, s2, widths):
out1 += w1.ljust(width + 1)
out2 += w2.ljust(width + 1)
print("%s: %s" % (labels[0] if labels else "s1", out1))
print("%s: %s" % (labels[1] if labels else "s2", out2))
def _is_long(x):
if hasattr(x, "data"):
x = x.data
return isinstance(x, torch.LongTensor) or isinstance(x, torch.cuda.LongTensor)
def onehot(indexes, n=None, ignore_index=None):
"""
Creates a one-representation of indexes with N possible entries
if N is not specified, it will suit the maximum index appearing.
indexes is a long-tensor of indexes
ignore_index will be zero in onehot representation
"""
if n is None:
n = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, n).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
return output
def smoothed_cross_entropy(
inputs,
target,
weight=None,
ignore_index=-100,
reduction="mean",
smooth_eps=None,
smooth_dist=None,
from_logits=True,
):
"""cross entropy loss, with support for target distributions and label smoothing
https://arxiv.org/abs/1512.00567"""
smooth_eps = smooth_eps or 0
# ordinary log-liklihood - use cross_entropy from nn
if _is_long(target) and smooth_eps == 0:
if from_logits:
return F.cross_entropy(
inputs, target, weight, ignore_index=ignore_index, reduction=reduction
)
else:
return F.nll_loss(
inputs, target, weight, ignore_index=ignore_index, reduction=reduction
)
if from_logits:
# log-softmax of inputs
lsm = F.log_softmax(inputs, dim=-1)
else:
lsm = inputs
masked_indices = None
num_classes = inputs.size(-1)
if _is_long(target) and ignore_index >= 0:
masked_indices = target.eq(ignore_index)
if smooth_eps > 0 and smooth_dist is not None:
if _is_long(target):
target = onehot(target, num_classes).type_as(inputs)
if smooth_dist.dim() < target.dim():
smooth_dist = smooth_dist.unsqueeze(0)
target.lerp_(smooth_dist, smooth_eps)
if weight is not None:
lsm = lsm * weight.unsqueeze(0)
if _is_long(target):
eps_sum = smooth_eps / num_classes
eps_nll = 1.0 - eps_sum - smooth_eps
likelihood = lsm.gather(dim=-1, index=target.unsqueeze(-1)).squeeze(-1)
loss = -(eps_nll * likelihood + eps_sum * lsm.sum(-1))
else:
loss = -(target * lsm).sum(-1)
if masked_indices is not None:
loss.masked_fill_(masked_indices, 0)
if reduction == "sum":
loss = loss.sum()
elif reduction == "mean":
if masked_indices is None:
loss = loss.mean()
else:
loss = loss.sum() / float(loss.size(0) - masked_indices.sum())
return loss
class SmoothedCrossEntropyLoss(nn.CrossEntropyLoss):
"""
CrossEntropyLoss - with ability to recieve distrbution as targets,
and optional label smoothing
"""
def __init__(
self,
weight=None,
ignore_index=-100,
reduction="mean",
smooth_eps=None,
smooth_dist=None,
from_logits=True,
):
super(SmoothedCrossEntropyLoss, self).__init__(
weight=weight, ignore_index=ignore_index, reduction=reduction
)
self.smooth_eps = smooth_eps
self.smooth_dist = smooth_dist
self.from_logits = from_logits
def forward(self, x, target, smooth_dist=None):
if smooth_dist is None:
smooth_dist = self.smooth_dist
return smoothed_cross_entropy(
x,
target,
weight=self.weight,
ignore_index=self.ignore_index,
reduction=self.reduction,
smooth_eps=self.smooth_eps,
smooth_dist=smooth_dist,
from_logits=self.from_logits,
)
def plot_tensors(model, tuples, detailed=False, return_fig=False):
"""
Plot first item in batch across multiple layers
"""
n_tensors = len(tuples)
fig, axs = plt.subplots(model.n_layers, n_tensors, dpi=144)
for i, (label, val) in enumerate(tuples):
for layer in range(model.n_layers):
layer_idx = model.n_layers - layer - 1
ax = axs[layer_idx][i] if n_tensors > 1 else axs[layer_idx]
# Get layer's values (from either list or tensor)
# Outputs can't be stored in tensors since dimension heterogeneous
if isinstance(val, list):
if val[layer] is None:
ax.set_visible(False)
t = None
else:
t = val[layer].detach()[0]
else:
t = val.detach()[layer, 0]
mod = list(model.children())[layer]
if t is not None:
size = t.numel()
is_cell_level = t.numel() == mod.total_cells and mod.n > 1
if is_cell_level:
ax.imshow(
t.view(mod.m, mod.n).t(),
origin="bottom",
extent=(0, mod.m - 1, 0, mod.n + 1),
)
else:
ax.imshow(activity_square(t))
tmin = t.min()
tmax = t.max()
tsum = t.sum()
title = "L%d %s" % (layer + 1, label)
if detailed:
title += " (%s, rng: %.3f-%.3f, sum: %.3f)" % (
size,
tmin,
tmax,
tsum,
)
ax.set_title(title)
if return_fig:
return fig
else:
plt.show()
| agpl-3.0 |
kernc/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
ChinaQuants/tushare | tushare/util/store.py | 40 | 1124 | # -*- coding:utf-8 -*-
"""
Created on 2015/02/04
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
import tushare as ts
from pandas import compat
import os
class Store(object):
def __init__(self, data=None, name=None, path=None):
if isinstance(data, pd.DataFrame):
self.data = data
else:
raise RuntimeError('data type is incorrect')
self.name = name
self.path = path
def save_as(self, name, path, to='csv'):
if name is None:
name = self.name
if path is None:
path = self.path
file_path = '%s%s%s.%s'
if isinstance(name, compat.string_types) and name is not '':
if (path is None) or (path == ''):
file_path = '.'.join([name, to])
else:
try:
if os.path.exists(path) is False:
os.mkdir(path)
file_path = file_path%(path, '/', name, to)
except:
pass
else:
print('input error')
| bsd-3-clause |
Microsoft/multiverso | binding/python/examples/theano/cnn.py | 6 | 5128 | #!/usr/bin/env python
# coding:utf8
"""
This code is adapted from
https://github.com/benanne/theano-tutorial/blob/master/6_convnet.py
The MIT License (MIT)
Copyright (c) 2015 Sander Dieleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
import load_data
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
# MULTIVERSO: import multiverso
import multiverso as mv
# MULTIVERSO: the sharedvar in theano_ext acts same like Theano's
# sharedVariables. But it use multiverso as the backend
from multiverso.theano_ext import sharedvar
x_train, t_train, x_test, t_test = load_data.load_cifar10()
labels_test = np.argmax(t_test, axis=1)
# reshape data
x_train = x_train.reshape((x_train.shape[0], 3, 32, 32))
x_test = x_test.reshape((x_test.shape[0], 3, 32, 32))
# define symbolic Theano variables
x = T.tensor4()
t = T.matrix()
# define model: neural network
def floatX(x):
return np.asarray(x, dtype=theano.config.floatX)
def init_weights(shape, name):
# MULTIVERSO: relace the shared variable with mv_shared
return sharedvar.mv_shared(floatX(np.random.randn(*shape) * 0.1), name=name)
def momentum(cost, params, learning_rate, momentum):
grads = theano.grad(cost, params)
updates = []
for p, g in zip(params, grads):
# MULTIVERSO: relace the shared variable with mv_shared
mparam_i = sharedvar.mv_shared(np.zeros(p.get_value().shape, dtype=theano.config.floatX))
v = momentum * mparam_i - learning_rate * g
updates.append((mparam_i, v))
updates.append((p, p + v))
return updates
def model(x, w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o):
c1 = T.maximum(0, conv.conv2d(x, w_c1) + b_c1.dimshuffle('x', 0, 'x', 'x'))
p1 = downsample.max_pool_2d(c1, (3, 3))
c2 = T.maximum(0, conv.conv2d(p1, w_c2) + b_c2.dimshuffle('x', 0, 'x', 'x'))
p2 = downsample.max_pool_2d(c2, (2, 2))
p2_flat = p2.flatten(2)
h3 = T.maximum(0, T.dot(p2_flat, w_h3) + b_h3)
p_y_given_x = T.nnet.softmax(T.dot(h3, w_o) + b_o)
return p_y_given_x
# MULTIVERSO: you should call mv.init before call multiverso apis
mv.init()
worker_id = mv.worker_id()
# MULTIVERSO: every process has distinct worker id
workers_num = mv.workers_num()
w_c1 = init_weights((4, 3, 3, 3), name="w_c1")
b_c1 = init_weights((4,), name="b_c1")
w_c2 = init_weights((8, 4, 3, 3), name="w_c2")
b_c2 = init_weights((8,), name="b_c2")
w_h3 = init_weights((8 * 4 * 4, 100), name="w_h3")
b_h3 = init_weights((100,), name="b_h3")
w_o = init_weights((100, 10), name="w_o")
b_o = init_weights((10,), name="b_o")
params = [w_c1, b_c1, w_c2, b_c2, w_h3, b_h3, w_o, b_o]
p_y_given_x = model(x, *params)
y = T.argmax(p_y_given_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(p_y_given_x, t))
updates = momentum(cost, params, learning_rate=0.01, momentum=0.9)
# compile theano functions
train = theano.function([x, t], cost, updates=updates, allow_input_downcast=True)
predict = theano.function([x], y, allow_input_downcast=True)
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier()
# train model
batch_size = 50
for i in range(50):
for start in range(0, len(x_train), batch_size):
# every process only train batches assigned to itself
if start / batch_size % workers_num != worker_id:
continue
x_batch = x_train[start:start + batch_size]
t_batch = t_train[start:start + batch_size]
cost = train(x_batch, t_batch)
# MULTIVERSO: sync value with multiverso after every batch
sharedvar.sync_all_mv_shared_vars()
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier() # barrier every epoch
# master will calc the accuracy
if mv.is_master_worker():
predictions_test = predict(x_test)
accuracy = np.mean(predictions_test == labels_test)
print "epoch %d - accuracy: %.4f" % (i + 1, accuracy)
# MULTIVERSO: You must call shutdown at the end of the file
mv.shutdown()
| mit |
armenvod/phone_match | testing_plz.py | 1 | 1251 | '''phone1 = 'Samsung Galaxy S8 Smartphone, Android, 5.8", 4G LTE, SIM Free, 64GB'
print(phone1.replace(',',''))'''
'''def brand_search(phone):
result = []
phone_split = phone.split()
string_length = len(phone_split)
i=0
while i < string_length:
empty = []
phones = fon.getdevice(phone_split[i])
try:
for phone in phones:
empty.append(phone['DeviceName'])
except:
empty.append(phones)
i =i+1
result.append(empty)
# print(result)
#seen = set()
# uniq = [x for x in result if x not in seen and not seen.add(x)]
#print(uniq)
gross = [[1,2,3,],[2,5,6]]
seen = set()
repeated = set()
for l in result:
for i in set(l):
if i in seen:
repeated.add(i)
else:
seen.add(i)
print(repeated)'''
import csv
import pandas as pd
df = pd.read_csv('18_oct.csv')
#saved_column = df['Name'] #you can also use df['column_name']
"""included_cols = [1,2]
with open('18_oct.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
content = list(row[i] for i in included_cols)
print(content)"""
| mit |
samhollenbach/Galaxy2 | Reader.py | 1 | 2657 | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import sys
REPEAT = True
SAVE_IMAGES = False
def randrange(n, vmin, vmax):
return (vmax - vmin)*np.random.rand(n) + vmin
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.has_been_closed = False
ax.set_axis_bgcolor('black')
n = 100
figsizeX = 10000
figsizeY = 10000
figsizeZ = 10000
plt.xlim([-figsizeX, figsizeX])
plt.ylim([-figsizeY, figsizeY])
ax.set_zlim(bottom=-figsizeZ, top=figsizeZ, emit=True, auto=False)
particle_num = 0
plt.tick_params(axis='both', color='r', labelcolor='r')
def on_close(event):
event.canvas.figure.axes[0].has_been_closed = True
ax.xaxis.label.set_color('red')
ax.yaxis.label.set_color('red')
ax.zaxis.label.set_color('red')
fig.canvas.mpl_connect('close_event', on_close)
def updateplot(iteration, xs, ys, zs, galaxy_num, particle_num):
ax.clear()
ax.autoscale(enable=False)
ax.set_xlabel('(pc)')
ax.set_ylabel('(pc)')
ax.set_zlabel('(pc)')
ax.text2D(0.05, 0.95, "Iteration: " + repr(iteration), transform=ax.transAxes, color='red')
ax.text2D(0.05, 0.90, "Galaxies: " + repr(galaxy_num), transform=ax.transAxes, color='red')
ax.text2D(0.05, 0.85, "Particles: " + repr(particle_num), transform=ax.transAxes, color='red')
ax.scatter(xs, ys, zs, c='r', marker='o', s=7)
if (SAVE_IMAGES):
plt.savefig("frames/frame_0{0:0>2}.png".format(iteration))
plt.pause(0.001)
def read_sim():
with open('sim_data.txt', 'r') as f:
iter = -1
xs = []
ys = []
zs = []
for line in f:
if ax.has_been_closed:
break
if line.startswith("HEAD:"):
head_data = line[5:].rstrip().split(',')
galaxies = int(head_data[0])
particles = int(head_data[1])
continue
data = line.split(',')
i = data[0]
if i != iter:
iter = i
try:
updateplot(iter, xs, ys, zs, galaxies, particles)
# break
except UnboundLocalError:
print("Improper HEAD line in your sim_data file, please fix this error and try again")
sys.exit()
xs = []
ys = []
zs = []
x = float(data[1])
y = float(data[2])
z = float(data[3])
xs.append(x)
ys.append(y)
zs.append(z)
read_sim()
while REPEAT and plt.fignum_exists(fig.number):
SAVE_IMAGES = False
read_sim()
plt.close() | mit |
cellnopt/cellnopt | cno/boolean/steady.py | 1 | 36274 | from cno.core.base import CNOBase
from cno.core.results import BooleanResults
from cno.core.models import BooleanModels
from cno.misc.profiler import do_profile
from cno.io.reactions import Reaction
import pandas as pd
import numpy as np
import pylab
import time
import bottleneck as bn
from collections import defaultdict
import collections
from easydev import AttrDict
class SteadyMultiRun(CNOBase):
"""Example of multi run and steady analysis)"""
def __init__(self, pknmodel, data, N=10, verbose=True):
super(SteadyMultiRun, self).__init__(pknmodel, data, verbose)
from cno.optimisers import multiresults
self.mr = multiresults.MultiResults()
def run(self, N=10, maxgens=20, popsize=50):
from easydev import progressbar
pb = progressbar.progress_bar(N, interval=1)
for i in range(0,N):
s = Steady(self.pknmodel, self.data)
s.optimise(maxtime=1e6, maxgens=maxgens, popsize=popsize, maxstallgen=200)
self.mr.add_scores(s.results.results.best_score)
pb.animate(i+1)
self.mr.interval = popsize
class Steady(CNOBase):
"""Naive implementation of Steady state to help in designing the API
Here is the algorithm:
In CellNOptR, FCs are normalised between -1 and 1. Then, FC are transformed into
values between 0 and 1 (even negative !!)
In order to recognise the negative values, the X0 values is set to 1 and the negative
FC is transformed into 1 - FC (i.e., a large FC .
This is obviously important and is reflected in the mathematical equations
set on the edges: if a link is an inhibitory link then the output f(x) = 1 - x.
If values are kept as -1, 0, 1, the boolean formalism cannot be implemented.
The time X0 should be the control that is where FC is zero. For positive FC, X0 is zero.
For negative FC, X0 is 1. This can be simulated by setting the inhibitors and stimuli
to zero.
"""
def __init__(self, pknmodel, data, verbose=True):
super(Steady, self).__init__(pknmodel, data, verbose)
self.model = self.pknmodel.copy()
# to speed up code later on
self.model.buffer_reactions = self.model.reactions[:]
# if you change the model or data
self._init_model_data()
self.debug = True
self.counter = 0
self.length_buffer = 10000
# affects the simulation and ts stopping criteria
# this is arbitrary additional tick to match CNOR simulation
self._shift = -1
self._shift0 = -1
self._params = {}
self._params['include_time_zero'] = True
self._params['NAFac'] = 1
self._params['sizeFac'] = 1e-4
self._params = AttrDict(**self._params)
self.debug_score = False
self.stopcount = None
self.paradoxical = {}
self.repressors = {}
self._previous_fit = 0
def _init_model_data(self):
self.time = self.data.times[1]
# just a reference to the conditions
self.inhibitors = self.data.inhibitors.copy()
self._inhibitors_none = self.data.inhibitors.copy() * 0
self._inhibitors_all = self.data.inhibitors.copy()
self.stimuli = self.data.stimuli.copy()
self._stimuli_none = self.data.stimuli.copy() * 0
self._stimuli_all = self.data.stimuli.copy()
self.inhibitors_names = self.data.inhibitors.columns
self.stimuli_names = self.data.stimuli.columns
self.N = self.data.df.query('time==0').shape[0]
self.results = BooleanResults() # for time T1
# ignore data of the edge [0:2]
self.toflip = [x[0:2] for x in self.model.edges(data=True) if x[2]['link'] == '-']
self.init(self.time)
self.buffering = True
self.buffer = {}
self.simulated = {}
#@do_profile()
def _init_values(self, time=False):
self.values = {}
for node in self.model.nodes():
# Do we really want NAs ? probably not. fold changes are by
# definitiotn 0
#self.values[node] = np.array([np.nan for x in range(0,self.N)])
self.values[node] = np.zeros(self.N)
if time > 0:
self.stimuli = self._stimuli_all
self.inhibitors = self._inhibitors_all
for this in self.stimuli_names:
self.values[this] = self.stimuli[this].values.copy()
#for this in self.inhibitors_names:
# self.values[this] = 1. - self.inhibitors[this].values.copy()
#toflip = [x[1] for x in self.toflip]
#for this in toflip:
# self.values[this] = np.ones(10)
else:
self.stimuli = self._stimuli_none
self.inhibitors = self._inhibitors_none
def init(self, time):
# Here, we define the values of the stimuli and inhibitors
# based on the order provided inside self.stimuli and
# self.inhibitors
# Later, one has to be cautious with the measured data, which
# order may be different !!
assert time in self.data.times
self._init_values(time=time)
self.and_gates = [x for x in self.model.nodes() if "^" in x]
self.predecessors = {}
for node in self.model.nodes():
self.predecessors[node] = self.model.predecessors(node)
self.number_predecessors = {}
for node in self.predecessors.keys():
self.number_predecessors = len(self.predecessors[node])
self.successors = {}
for node in self.model.nodes():
self.successors[node] = self.model.successors(node)
self.nInputs = np.sum([len(self.model.predecessors(x)) for x in self.model.nodes()])
self.nInputs -= len(self.model._find_and_nodes()) # get rid of the output edge on AND gates
self.tochange = [x for x in self.model.nodes() if x not in self.stimuli_names
and x not in self.and_gates]
self._reactions = [Reaction(r) for r in self.model.reactions]
self.N_reactions = len(self._reactions)
self._np_reactions = np.array(self.model.reactions)
self._reac2pred = {}
for r in self.model.reactions:
reac = Reaction(r)
if "^" in reac.lhs:
self._reac2pred[r] = (r, reac.lhs_species)
else:
self._reac2pred[r] = (reac.rhs, reac.lhs_species)
# note that the order of the rows is experiments as defined in
# data.df not data.experiments
self.measures = {}
# FIXME No need for time zero but if so, need to re-order the experiments
#self.measures[0] = self.data.df.query("time==0").reset_index(drop=True).values
for time in self.midas.times:
df = self.data.df.query("time==@time")
df = df.ix[self.data.cellLine]
df = df.ix[self.stimuli.index]
df = df.reset_index(drop=True).values
self.measures[time] = df.copy()
self.inhibitors_failed = []
def preprocessing(self, expansion=True, compression=True, cutnonc=True):
self.model.midas = self.data
self.model.preprocessing(expansion=expansion, compression=compression,
cutnonc=cutnonc)
self.init(self.time)
self.model.buffer_reactions = self.model.reactions
# we should be using _model from the beginning?
self._model = self.model
#@do_profile()
def simulate(self, reactions=None, time=None, ntic=None):
if time != None:
assert time in self.data.times
self.time = time
# sometimes the shift required to agree with CNOR are not the same at time0 or time1...
# time T0
if len(self.toflip):
self._init_values(0)
tmp = self._shift
self._shift = self._shift0
self._simulate(reactions=reactions, time=0, ntic=ntic)
self._shift = tmp
else:
self.simulated[0] = np.zeros(self.measures[0].shape)
# time T1
self._init_values(self.time)
self._simulate(reactions=reactions, time=None, ntic=ntic)
#@do_profile()
def _simulate(self, reactions=None, time=None, ntic=None):
"""
"""
# pandas is very convenient but slower than numpy
# The dataFrame instanciation is costly as well.
# For small models, it has a non-negligeable cost.
# inhibitors will be changed if not ON
#self.tochange = [x for x in self.model.nodes() if x not in self.stimuli_names
# and x not in self.and_gates]
if time is None:
time = self.time
# what about a species that is both inhibited and measured
testVal = 1e-3
import copy
#values = copy.deepcopy(self.values)
values = self.values # !! reference but should be reset when calling _init_values / simulate()
if self.debug:
self.debug_values = [values.copy()]
self.residuals = []
self.penalties = []
self.count = 0
self.nSp = len(values)
residual = 1.
frac = 1.2
# _shift is set to +1 FIXME +1 is to have same results as in CellnOptR
# It means that if due to the cycles, you may not end up with same results.
# this happends if you have cyvles with inhbititions
# and an odd number of edges.
if reactions is None:
reactions = self.model.buffer_reactions
self.number_edges = len([r for r in reactions]) + sum([this.count('^') for this in reactions])
# 10 % time here
#predecessors = self.reactions_to_predecessors(reactions)
predecessors = defaultdict(collections.deque)
for r in reactions:
k, v = self._reac2pred[r]
predecessors[k].extend(v)
# speed up
keys = sorted(self.values.keys())
length_predecessors = dict([(node, len(predecessors[node])) for node in keys])
#self._length_predecessors = length_predecessors
# if there is an inhibition/drug, the node is 0
# FIXME is this required ??
for inh in self.inhibitors_names:
if length_predecessors[inh] == 0:
#values[inh] = np.array([np.nan for x in range(0,self.N)])
values[inh] = np.zeros(self.N)
# to get same results as in cnor, it is sometimes required
# to add one more count.
# to have same results at time 0 as in LiverDream, +3 is required
if ntic is None:
ntic = self.nSp * frac + self._shift
else: # we want to use the ntic as unique stopping criteria
testVal = -1
while ((self.count < ntic) and residual > testVal):
self.previous = values.copy()
#self.X0 = pd.DataFrame(self.values)
#self.X0 = self.values.copy()
# compute AND gates first. why
# an paradoxical effects induced by drugs ?
# should be first before updating other nodes
#for inh in self.paradoxical.keys():
# if node in self.paradoxical[inh]:
# values[node][(self.inhibitors[inh]==1).values] = 1
# #values[inh][(self.inhibitors[inh]==1).values] = 1
for node in self.and_gates:
# replace na by large number so that min is unchanged
# THere are always predecessors
if length_predecessors[node] != 0:
values[node] = bn.nanmin(np.array([self.previous[x] for x in predecessors[node]]), axis=0)
else:
#assert 1==0, "%s %s" % (node, predecessors[node])
values[node] = self.previous[node]
for node in self.tochange:
# easy one, just the value of predecessors
#if len(self.predecessors[node]) == 1:
# self.values[node] = self.values[self.predecessors[node][0]].copy()
if length_predecessors[node] == 0:
pass # nothing to change
else:
dummy = np.array([self.previous[x] if (x,node) not in self.toflip
else 1 - self.previous[x] for x in predecessors[node]])
try:
values[node] = bn.nanmax(dummy, axis=0)
except:
# in some simple cases, we must reset the type. why.
values[node] = bn.nanmax(dummy.astype('int'), axis=0)
# take inhibitors into account
if node in self.inhibitors_names and node not in self.inhibitors_failed:
# if inhibitors is on (1), multiply by 0
# if inhibitors is not active, (0), does nothing.
values[node] *= 1 - self.inhibitors[node].values
# an paradoxical effects induced by drugs ?
for inh in self.paradoxical.keys():
if node in self.paradoxical[inh]:
values[node][(self.inhibitors[inh]==1).values] = 1
for inh in self.repressors.keys():
if node in self.repressors[inh]:
values[node][(self.inhibitors[inh]==1).values] = 0
# here NAs are set automatically to zero because of the int16 cast
# but it helps speeding up a bit the code by removig needs to take care
# of NAs. if we use sumna, na are ignored even when 1 is compared to NA
self.m1 = np.array([self.previous[k] for k in keys ], dtype=np.int16)
self.m2 = np.array([values[k] for k in keys ], dtype=np.int16)
residual = bn.nansum(np.square(self.m1 - self.m2))
#residual = np.nansum(np.square(self.m1 - self.m2))
# TODO stop criteria should account for the length of the species to the
# the node itself so count < nSp should be taken into account whatever is residual.
#
if self.debug:
self.debug_values.append(self.values.copy())
self.residuals.append(residual)
if self.stopcount :
if self.count <10:
residual+=1
self.count += 1
#if self.debug is True:
# # add the latest values simulated in the while loop
# self.debug_values.append(values.copy())
#self._values2 = values
# Need to set undefined values to NAs
mask = self.m1 != self.m2
data = np.array([values[k] for k in keys], dtype=float)
data[mask] = np.nan
self.dd = data
indices = [keys.index(x) for x in self.data.df.columns]
if time == 0:
self.simulated[0] = data[indices,:].transpose()
else:
self.simulated[self.time] = data[indices,:].transpose()
def get_errors_rates(self):
FN0 = ((self.simulated[0] - self.measures[0])<-0.5).sum(axis=0)
FP0 = ((self.simulated[0] - self.measures[0])>0.5).sum(axis=0)
FN = ((self.simulated[self.time] - self.measures[self.time])<-0.5).sum(axis=0)
FP = ((self.simulated[self.time] - self.measures[self.time])>0.5).sum(axis=0)
df = pd.DataFrame({'FP':FP, 'FN':FN, 'FN0':FN0, 'FP0':FP0}, index=self.midas.df.columns)
df /= len(self.midas.experiments)
return df
#@do_profile()
def score(self):
"""
Akt Hsp27 NFkB Erk p90RSK Jnk cJun
[1,] 0.0081 0.00 0.7396 0.04 0.0144 0 0
[2,] 0.0324 0.09 0.0100 0.00 0.0000 0 0
[3,] 0.0081 0.09 0.0100 0.04 0.0144 0 0
[4,] 0.0081 0.00 0.7396 0.00 0.0000 0 0
[5,] 0.0324 0.09 0.0100 0.00 0.0000 0 0
[6,] 0.0081 0.09 0.0100 0.00 0.0000 0 0
[7,] 0.0000 0.00 0.0000 0.04 0.0144 0 0
[8,] 0.0000 0.09 0.0100 0.00 0.0000 0 0
[9,] 0.0000 0.09 0.0100 0.04 0.0144 0 0
[1] "------------"
[1] 0.03805909
1 0 0 1 1 0 0
[2,] 1 1 1 0 0 0 0
[3,] 1 1 1 1 1 0 0
[4,] 1 0 0 0 0 0 0
[5,] 1 1 1 0 0 0 0
[6,] 1 1 1 0 0 0 0
[7,] 0 0 0 1 1 0 0
[8,] 0 1 1 0 0 0 0
[9,] 0 1 1 1 1 0 0
"nDataPts= 63"
[1] "nInputs= 13" okay
[1] "nInTot= 22" okay
[1] "deviationPen= 2.394"
[1] "NAPen= 0"
[1] "sizePen= 0.00372272727272727" okay
[1] 0.03805909
For T2 --> 0.03805909
:return:
"""
# time 1 only is taken into account
#self.diff = np.square(self.measures[self.time] - self.simulated[self.time])
diff1 = (self.measures[self.time] - self.simulated[self.time])**2
if self._params['include_time_zero'] is True:
diff0 = (self.measures[0] - self.simulated[0])**2
else:
diff0 = 0
# FIXME we could have an option to ignore time 0
diff = diff1 + diff0
N = diff.shape[0] * diff.shape[1]
# FIXME Another issue with CNOR is that NAs are takem from the simulated data only, not the data itself...
Nna1 = np.isnan(diff1).sum()
Nna0 = np.isnan(diff0).sum()
#we should check for NA is the measured and simulated data so in the diff as above but in cnor, htis is
# coming from the simulated data only....
Nna1 = np.isnan(self.simulated[self.time]).sum()
# FIXME in cNOR, NAs at time 0 are ignored. why ?
Nna = np.isnan(self.measures[self.time]).sum()
N-= Nna
#nInTot = number of edges on in global model
#nInTot = len(self.model.reactions)
nInTot = self.nInputs # should be correct
nDataPts = diff.shape[0] * diff.shape[1]
nDataP = N # N points excluding the NA if any
#NAPen = NAFac * sum(self.simulated[self.time].isnull())
# nInTot: number of inputs of expanded miodel
# nInputs: number of inputs of cut model
# In CNO:
# nDataPts = number of points irrespective of NA
# nDataP sum(!is.na(CNOlist@signals[[timeIndex]]))
# nInputs = number of inputs of the cut model
# for now, ketassume it is the same as the number of reactions
# TODO AND gates should count for 1 edge
nInputs = self.number_edges
# sizePen should be same as in CNOR
sizePen = nDataPts * self._params.sizeFac * nInputs / float(nInTot)
debug = self.debug_score
if debug:
print("----")
print("nDataPts=%s" % nDataPts)
print("nInputs=%s" % nInputs)
print("nInTot=%s" % nInTot)
print('sizePen=%s' %sizePen)
print('diff0=%s', (bn.nansum(diff0)) )
print('diff1=%s', (bn.nansum(diff1 )))
# TODO
self.diff0 = diff0
self.diff1 = diff1
deviationPen = (bn.nansum(diff1) + bn.nansum(diff0))/ 2.
#self.diff = diff / 2.
#if self._params['include_time_zero'] is True:
# deviationPen *=2 # does not really matter but agrees with CNOR
if debug:
print("deviationPen=%s"% deviationPen)
print("Nna=(%s %s)"% (Nna0, Nna1))
print("nDataP=%s"% nDataP)
print("deviationPen=%s"% deviationPen)
if nDataP !=0:
deviationPen /= float(nDataP)
S = deviationPen + sizePen / nDataP
else:
S = deviationPen
self._na_contrib = Nna/float(nDataPts)
S = (S + self._params.NAFac * Nna1/float(nDataPts))
#print self._previous_fit
if debug:
print("score=%s" %S)
return S
def get_df(self, time, columns=None):
if columns is None:
columns = self.data.df.columns
import pandas as pd
df = pd.DataFrame(self.simulated[time], columns=self.data.df.columns)
#return df.columns[columns]
return df
def plot(self):
self.model.plot()
#@do_profile()
def test(self, N=100):
# N = 100, all bits on
# on EBI laptop:
# 23 April
# LiverDREAM 1s
# ToyMMB: 0.3s
# ToyPB: 4.7 (lots of feedback and NAs
# ExtLiverPCB: 1.54s
# CellNOptR on LiverDREAM 0.85 seconds. 0.58 in cno
# CellNOptR on LiverDREAM preprocessed) on:0.75 seconds. 1.42 in cno
# 0.2574948 in CellNOptR sometimes, we can reach a score=0.019786867202
# 0.27
# CellNOptR on ToyMMB : 0.13 ; 0.22s in cno
#
# 0.09467
# process and "EGF=Raf" "EGF+TNFa=PI3K" "Erk+TNFa=Hsp27" off
# then MSE is 0.10838/2
# CellNOptR on ExtLiverPCB : 1.4 seconds ; 1.7s in cno
# 0.29199
# cost of pruning models ?
"""
library(CellNOptR)
cnolist = ...
pknmodel = ...
system.time(replicate(100,computeScoreT1(cnolist, pknmodel, rep(58) ) ) )
"""
t1 = time.time()
reactions = []
while len(reactions)==0:
import random
threshold = np.random.uniform(0,1,1)
reactions = [r for r in self.model.reactions if random.uniform(0,1)>threshold]
self.simulate()
for i in range(0,N):
#self.init(self.time)
self.simulate()
self.score()
t2 = time.time()
print(str(t2-t1) + " seconds")
return t2-t1
def plotsim(self, experiments=None, fontsize=16, vmin=0, vmax=1, cmap='gray'):
"""
:param experiments: if None, shows the steady state for each experiment and species
if provided, must be a valid experiment name (see midas.experiments attribute)
in which case, for that particular experiment, the steady state and all previous
states are shown for each species.
A simulation must be performed using :meth:`simulate`
::
# those 2 calls are identical
s.plotsim(experiments=8)
s.plotsim(experiments=8)
# This plot the steady states for all experiments
s.plotsim()
"""
# This is for all experiments is experiments is None
cm = pylab.get_cmap(cmap)
pylab.clf()
if experiments is None: # takes the latest (steady state) of each experiments
data = pd.DataFrame(self.debug_values[-1]).fillna(0.5)
else:
exp_name = self.midas.experiments.ix[experiments].name
index_exp = list(self.midas.experiments.index).index(exp_name)
data = [(k, [self.debug_values[i][k][index_exp] for i in range(0, len(self.debug_values))])
for k in self.debug_values[0].keys()]
data = dict(data)
data = pd.DataFrame(data).fillna(0.5)
data = data.ix[data.index[::-1]]
self.dummy = data
pylab.pcolor(data, cmap=cm, vmin=vmin, vmax=vmax,
shading='faceted', edgecolors='gray')
pylab.colorbar()
ax1 = pylab.gca()
ax1.set_xticks([])
Ndata = len(data.columns)
ax1.set_xlim(0, Ndata)
ax1.set_ylim(0, len(data))
ax = pylab.twiny()
# FIXME seems shifted. could not fix it xticklabels seems to reset the position of the ticks
xr = pylab.linspace(0.5, Ndata-1.5, Ndata)
ax.set_xticks(xr)
ax.set_xticklabels(data.columns, fontsize=fontsize, rotation=90)
times = list(data.index)
Ntimes = len(times)
ax1.set_yticks([x+0.5 for x in times])
ax1.set_yticklabels(times[::-1],
fontsize=fontsize)
pylab.sca(ax1)
#pylab.title("Steady state for all experiments(x-axis)\n\n\n\n")
pylab.tight_layout()
def plot_errors(self, columns=None, reactions=None, show=True):
# use eval_func with debug one
debug = self.debug
self.debug = True
buffering = self.buffering
self.buffering = False
if reactions is None:
try:
self.eval_func(self.ga.results['Best_bitString'][-1])
except:
self.eval_func(self.best_bitstring)
else:
self.eval_func(self.reactions2parameters(reactions))
self.buffering = buffering
self.debug = debug
self.data.sim = self.get_sim()
if show is True:
self.data.plot(mode='mse')
score = self.score()
m = self.data.copy()
return m
def get_sim(self, columns=None):
if columns is None:
columns = self.data.df.columns
X0 = self.get_df(0, columns=columns)
X1 = self.get_df(self.time, columns=columns)
N = X1.shape[0]
X0['time'] = [0] * N
X0['cell'] = [self.data.cellLine] * N
X0['experiment'] = self.data.experiments.index
X0.set_index(['cell', 'experiment', 'time'], inplace=True)
sim = self.data.sim.copy()
sim.ix[X0.index] = X0 #.fillna(2)
X1['time'] = [self.time] * N
X1['cell'] = [self.data.cellLine] * N
X1['experiment'] = self.data.experiments.index
X1.set_index(['cell', 'experiment', 'time'], inplace=True)
sim.ix[X1.index] = X1 #.fillna(2)
return sim
def optimise2(self, time=None, verbose=True):
assert len(self.data.times) >= 2, "Must have at least 2 time points in the data"
time1 = self.time
if time is None:
self.time = self.data.times[2]
else:
self.time = time
self.init(self.time)
prior = list(self.results.results.best_bitstring) # best is the prior
self.simulate(self.parameters2reactions(list(self.best_bitstring)))
self._previous_fit = self.score()
self.optimise(prior=prior, verbose=verbose)
# reset to time1 FIXME why ?
self.init(time1)
def exhaustive(self):
from cno.optimisers.binary_tools import permutations
# create all
scores = []
sizes = []
from easydev import progress_bar
N = len(self.model.reactions)
pb = progress_bar(2**N)
for i,this in enumerate(permutations(N)):
self.simulate(self.parameters2reactions(this))
scores.append(self.score())
pb.animate(i)
sizes.append(sum(this))
#self._fill_results()
self.scores = scores
self.sizes = sizes
return scores
def parameters2reactions(self, chromosome):
reactions = [x for c,x in zip(chromosome, self._np_reactions) if c==1]
return reactions
def reactions2parameters(self, reactions):
reactions_off = [x for x in self.model.reactions if x not in reactions]
return self.prior2parameters(reactions, reactions_off)
def prior2parameters(self, reactions_on=[], reactions_off=[]):
prior = [None] * len(self.model.reactions)
assert len(set(reactions_on).intersection(set(reactions_off))) == 0,\
"Error. Found reactions in both lists."
for this in reactions_on:
idx = self.model.reactions.index(this)
prior[idx] = 1
for this in reactions_off:
idx = self.model.reactions.index(this)
prior[idx] = 0
return prior
#@do_profile()
def eval_func(self, chromosome, prior=[]):
"""
:param prior: a list of same length as chromosome made of 0/1/None
"""
# TODO limnit the buffering ?
for i, this in enumerate(prior):
if this is not None:
chromosome[i] = this
# using string or tuple takes about the same time but faster than a list
str_chrome = tuple(chromosome)
if self.buffering and len(self.buffer)<self.length_buffer and str_chrome in self.buffer.keys():
return self.buffer[str_chrome]
else:
# 110 times faster using numpy array instead of a list...
reactions = [x for c,x in zip(chromosome, self._np_reactions) if c==1]
self.simulate(reactions=reactions)
score = self.score()
if self.buffering is True and len(self.buffer)<self.length_buffer:
self.buffer[str_chrome] = score
self.counter +=1
return score
#@do_profile()
def optimise(self, verbose=False, popsize=50, maxgens=500, show=False, reltol=0.1,
pmutation=0.5,
maxtime=60, elitism=5, prior=[], guess=None, reuse_best=True, maxstallgen=100):
"""Using the CellNOptR-like GA"""
from cno.optimisers import genetic_algo
self._previous_fit = 0
ga = genetic_algo.GABinary(len(self.model.reactions), verbose=verbose,
maxgens=maxgens, popsize=popsize, maxtime=maxtime, reltol=reltol,
maxstallgen=maxstallgen, elitism=elitism, pmutation=pmutation)
# TODO: check length of init guess
if reuse_best is True:
try:
guess = self.results.results.best_bitstring
except:
pass
if guess is not None:
self.logging.debug('Setting guess')
ga.guess = guess
ga.init()
def eval_func_in(x):
return self.eval_func(x, prior=prior)
self.counter = 0
ga.getObj = eval_func_in
ga.run(show=show)
self.ga = ga
self._fill_results()
return ga
def _fill_results(self):
# FIXME this could be simplified a lot
from easydev import AttrDict
res = AttrDict(**self.ga.results)
results = pd.DataFrame(self.ga.results)
columns_int = ['Generation', 'Stall_Generation']
columns_float = ['Best_score', 'Avg_Score_Gen', 'Best_Score_Gen', 'Iter_time']
results[columns_int] = results[columns_int].astype(int)
results[columns_float] = results[columns_float].astype(float)
results = {
'best_score': res.Best_score,
'best_bitstring': res.Best_bitString[-1],
'all_scores': self.ga.popTolScores,
'all_bitstrings': self.ga.popTol,
'reactions': self.model.reactions,
#'sim_results': self.session.sim_results, # contains mse and sim at t0,t1,
'results': results,
#'models': models,
}
results['pkn'] = self.pknmodel
results['midas'] = self.data
#self.results.models = models
all_bs = self.ga.popTol
df = pd.DataFrame(all_bs, columns=self.model.reactions)
models = BooleanModels(df)
models.scores = results['all_scores']
self.results.results = results
self.results.models = models
self.results.models.cnograph.midas = self.data # to get the MIDAS annotation
self.best_bitstring = self.results.results.best_bitstring
def plot_models(self, filename=None, model_number=None, tolerance=None):
# if model_number set to float, models are filtered
# with scores < (1+model_number) times best score
self.results.models.plot(filename=None, model_number=model_number, tolerance=tolerance)
def _plot_essentiality(self, best_score, scores, threshold=1e-4, new_reactions=None, fontsize=16):
reactions = scores.keys()
pylab.clf()
pylab.axhline(best_score, label='score (all reactions)')
#pylab.axhline(best_score+ threshold, label=)
keys = sorted(scores.keys())
values = [scores[k] for k in keys]
pylab.plot(values, 'or', markersize=8)
N = len(keys)
pylab.xticks(range(0, N), keys, rotation=90, fontsize=fontsize)
pylab.yticks(fontsize=fontsize)
if new_reactions is not None:
self.simulate(reactions=new_reactions)
score = self.score()
pylab.axhline(score, color='k', lw=2 , ls='--', label='score (essential reactions)')
pylab.legend(fontsize=fontsize)
pylab.xlim(-1, len(values))
pylab.grid(True)
pylab.tight_layout()
def essentiality(self, reactions=None, threshold=1e-4, show=True, fontsize=20):
if reactions is None:
best_bitstring = list(self.results.results.best_bitstring)
reactions = self.parameters2reactions(self.best_bitstring)
self.simulate(reactions)
best_score = self.score()
scores = {}
for reac in reactions:
pruned_reactions = [r for r in reactions if r!=reac]
self.simulate(pruned_reactions)
scores[reac] = self.score()
new_reactions = reactions[:]
for reac in scores.keys():
if scores[reac] <= best_score + threshold:
new_reactions.remove(reac)
if show is True:
self._plot_essentiality(best_score, scores, threshold=threshold, new_reactions=new_reactions, fontsize=fontsize)
return scores, new_reactions
def essentiality_ands(self, reactions, threshold=1e-4):
"""checks essiality each reaons and all andre remov.
"""
self.simulate(reactions)
best_score = self.score()
scores = {}
for reac in reactions:
pruned_reactions = [r for r in reactions if r!=reac]
self.simulate(pruned_reactions)
scores[reac] = self.score()
new_reactions = reactions[:]
for reac in scores.keys():
if scores[reac] <= best_score + threshold:
new_reactions.remove(reac)
self._plot_essentiality(best_score, scores, threshold=threshold, new_reactions=new_reactions)
noands = [r for r in reactions if "^" not in r]
self.simulate(noands)
score_noands = self.score()
print('Scores with all reactions =%s.' % best_score)
print('Scores with no AND reactions =%s.' % score_noands)
pylab.axhline(score_noands, color='g', lw=4 , ls='-', alpha=0.3, label='score (no ands)')
pylab.legend()
return scores, noands
def prun_model(self, reactions):
self.simulate(reactions)
best_score = self.score()
scores, newr = self.essentiality(reactions, show=False)
self.simulate(newr)
new_score = self.score()
return newr
def clean_models(self, tolerance=0.1):
models = self.results.models.copy()
models.midas = self.midas
print("Found %s models within the tolerance" % len(models.df))
models.drop_duplicates()
print("Removing duplicates found %s" % len(models.df))
models.drop_scores_above(tolerance=tolerance)
print("Keeping within tolerance, found %s" % len(models.df))
from easydev import progress_bar
pb = progress_bar(len(models))
count = 0
changed = 0
for index in models.df.index:
count +=1
reactions = list(models.df.columns[models.df.ix[index]==1])
self.simulate(reactions)
score = self.score()
#if models.scores[index] != score:
# print(index, models.scores[index], score)
# compute essentiality to simplify models
dummy, newr = self.essentiality(reactions, show=False)
self.simulate(newr)
new_score = self.score()
#print score, new_score, len(reactions), len(newr)
if new_score <= score:
# keep that pruned model
models.df.ix[index] = self.reactions2parameters(newr)
models.scores.ix[index] = new_score
changed += 1
else:
# keep original
pass
pb.animate(count)
print('Simplified %s %% of the model' % float(changed/float(len(models.df))))
models.drop_duplicates()
print("Removing duplicaes found %s" % len(models.df))
models.drop_scores_above(tolerance=tolerance)
print("Keeping within tolerance, found %s" % len(models.df))
return models
| bsd-2-clause |
KennyCandy/HAR | _module123/CC_64_32.py | 1 | 17631 | # Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 128])
b_conv2 = weight_variable([128])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Third Convolutional Layer
W_conv3 = weight_variable([3, 3, 128, 1])
b_conv3 = weight_variable([1])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = h_conv3
h_pool3 = tf.reshape(h_pool3, shape=[-1, 32, 36])
feature_mat = h_pool3
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close() | mit |
github4ry/pathomx | pathomx/plugins/multivariate/pca.py | 2 | 1960 | from sklearn.decomposition import PCA
pca = PCA(n_components=config['number_of_components'])
pca.fit(input_data.values)
import pandas as pd
import numpy as np
# Build scores into a dso no_of_samples x no_of_principal_components
scores = pd.DataFrame(pca.transform(input_data.values))
scores.index = input_data.index
columns = ['Principal Component %d (%0.2f%%)' % (n + 1, pca.explained_variance_ratio_[0] * 100.) for n in range(0, scores.shape[1])]
scores.columns = columns
weights = pd.DataFrame(pca.components_)
weights.columns = input_data.columns
dso_pc = {}
weightsi = []
# Generate simple result figure (using pathomx libs)
from pathomx.figures import spectra, scatterplot, plot_point_cov
for n in range(0, pca.components_.shape[0]):
pcd = pd.DataFrame(weights.values[n:n + 1, :])
pcd.columns = input_data.columns
vars()['PC%d' % (n + 1)] = spectra(pcd, styles=styles)
weightsi.append("PC %d" % (n + 1))
weights.index = weightsi
if config['plot_sample_numbers']:
label_index = 'Sample'
else:
label_index = None
# Build scores plots for all combinations up to n
score_combinations = list( set([ (a,b) for a in range(0,n) for b in range(a+1, n+1)]) )
for sc in score_combinations:
vars()['Scores %dv%d' % (sc[0]+1, sc[1]+1)] = scatterplot(scores.iloc[:,sc], styles=styles, label_index=label_index)
pcd = None
# Clean up
if config['filter_data']:
ffilter = None
for sc in score_combinations:
e = plot_point_cov( scores.iloc[:, score_combinations[0]])
filterset = []
for n in range(0, scores.shape[0]):
v = scores.iloc[n, score_combinations[0]].values
filterset.append( e.contains_point(v, radius=0))
filterset = np.array(filterset, dtype=np.bool)
if ffilter is not None:
ffilter = ffilter & filterset
else:
ffilter = filterset
filtered_data = input_data.iloc[filterset]
else:
filtered_data = None | gpl-3.0 |
q1ang/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/cluster/tests/test_spectral.py | 1 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| mit |
leggitta/mne-python | mne/cov.py | 2 | 71951 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import os
from math import floor, ceil, log
import itertools as itt
import warnings
import six
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_has_eeg_average_ref_proj)
from .io import fiff_open
from .io.pick import (pick_types, channel_indices_by_type, pick_channels_cov,
pick_channels, pick_info, _picks_by_type)
from .io.constants import FIFF
from .io.meas_info import read_bad_channels
from .io.proj import _read_proj, _write_proj
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import _is_good
from .utils import (check_fname, logger, verbose, estimate_rank,
_compute_row_norms, check_sklearn_version, _time_mask)
from .utils import deprecated
from .externals.six.moves import zip
from .externals.six import string_types
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""get the slice."""
tstart, tend = None, None
mask = _time_mask(epochs.times, tmin, tmax)
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of string
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None):
"""Init of covariance."""
diag = True if data.ndim == 1 else False
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception as inst:
fid.close()
os.remove(fname)
raise inst
end_file(fid)
def as_diag(self, copy=True):
"""Set covariance to be processed as being diagonal.
Parameters
----------
copy : bool
If True, return a modified copy of the covarince. If False,
the covariance is modified in place.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
"""
if self['diag'] is True:
return self.copy() if copy is True else self
if copy is True:
cov = cp.deepcopy(self)
else:
cov = self
cov['diag'] = True
cov['data'] = np.diag(cov['data'])
cov['eig'] = None
cov['eigvec'] = None
return cov
def __repr__(self):
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cp.deepcopy(cov)
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data.
Parameters
----------
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Call pyplot.show() as the end or not.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
from .viz.misc import plot_cov
return plot_cov(self, info, exclude, colorbar, proj, show_svd, show)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : string
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info.
verbose : bool, str, int, or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
.. versionadded:: 0.9.0
"""
info = pick_info(info, pick_types(info, meg=True, eeg=True, exclude=[]))
info._check_consistency()
# Standard deviations to be used
grad_std = 5e-13
mag_std = 20e-15
eeg_std = 0.2e-6
logger.info('Using standard noise values '
'(MEG grad : %6.1f fT/cm MEG mag : %6.1f fT EEG : %6.1f uV)'
% (1e13 * grad_std, 1e15 * mag_std, 1e6 * eeg_std))
data = np.zeros(len(info['ch_names']))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(grad_std, mag_std, eeg_std)):
data[pick_types(info, meg=meg, eeg=eeg)] = val * val
return Covariance(data, info['ch_names'], info['bads'], info['projs'],
nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
text = ('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
warnings.warn(text)
logger.warning(text)
@deprecated('"compute_raw_data_covariance" is deprecated and will be '
'removed in MNE-0.11. Please use compute_raw_covariance instead')
@verbose
def compute_raw_data_covariance(raw, tmin=None, tmax=None, tstep=0.2,
reject=None, flat=None, picks=None,
verbose=None):
return compute_raw_covariance(raw, tmin, tmax, tstep,
reject, flat, picks, verbose)
@verbose
def compute_raw_covariance(raw, tmin=None, tmax=None, tstep=0.2,
reject=None, flat=None, picks=None,
verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance
from empty room data or time intervals before starting
the stimulation.
Note: To speed up the computation you should consider preloading raw data
by setting preload=True when reading the Raw data.
Parameters
----------
raw : instance of Raw
Raw data
tmin : float | None (default None)
Beginning of time interval in seconds
tmax : float | None (default None)
End of time interval in seconds
tstep : float (default 0.2)
Length of data chunks for artefact rejection in seconds.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6 # uV (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
picks : array-like of int | None (default None)
Indices of channels to include (if None, all channels
except bad channels are used).
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance
Noise covariance matrix.
See Also
--------
compute_covariance : Estimate noise covariance matrix from epochs
"""
sfreq = raw.info['sfreq']
# Convert to samples
start = 0 if tmin is None else int(floor(tmin * sfreq))
if tmax is None:
stop = int(raw.last_samp - raw.first_samp)
else:
stop = int(ceil(tmax * sfreq))
step = int(ceil(tstep * raw.info['sfreq']))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True, eog=False,
ref_meg=False, exclude=[])
data = 0
n_samples = 0
mu = 0
info = pick_info(raw.info, picks)
idx_by_type = channel_indices_by_type(info)
# Read data in chuncks
for first in range(start, stop, step):
last = first + step
if last >= stop:
last = stop
raw_segment, times = raw[picks, first:last]
if _is_good(raw_segment, info['ch_names'], idx_by_type, reject, flat,
ignore_chs=info['bads']):
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
_check_n_samples(n_samples, len(picks))
mu /= n_samples
data -= n_samples * mu[:, None] * mu[None, :]
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
projs = cp.deepcopy(raw.info['projs'])
# XXX : do not compute eig and eigvec now (think it's better...)
return Covariance(data, ch_names, bads, projs, nfree=n_samples)
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stim periods
when the stim onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined.
A) either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
B) an Epochs object is created for multiple events and passed
to this function.
Note: Baseline correction should be used when creating the Epochs.
Otherwise the computed covariance matrix will be inaccurate.
Note: For multiple event types, it is also possible to create a
single Epochs object with events obtained using
merge_events(). However, the resulting covariance matrix
will only be correct if keep_sample_mean is True.
Note: The covariance can be unstable if the number of samples is not
sufficient. In that case it is common to regularize a covariance
estimate. The ``method`` parameter of this function allows to
regularize the covariance in an automated way. It also allows
to select between different alternative estimation algorithms which
themselves achieve regularization. Details are described in [1].
Parameters
----------
epochs : instance of Epochs, or a list of Epochs objects
The epochs.
keep_sample_mean : bool (default true)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to run a
set of the different methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in ref. [1]. Valid methods are:
'empirical', the empirical or sample covariance,
'diagonal_fixed', a diagonal regularization as in mne.cov.regularize
(see MNE manual), 'ledoit_wolf', the Ledoit-Wolf estimator (see [2]),
'shrunk' like 'ledoit_wolf' with cross-validation for optimal alpha
(see scikit-learn documentation on covariance estimation), 'pca',
probabilistic PCA with low rank
(see [3]), and, 'factor_analysis', Factor Analysis with low rank
(see [4]). If 'auto', expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
Note. 'ledoit_wolf' and 'pca' are similar to 'shrunk' and
'factor_analysis', respectively. They are not included to avoid
redundancy. In most cases 'shrunk' and 'factor_analysis' represent
more appropriate default choices.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of `method`.
If None (default), expands to::
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False,
'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
cv : int | sklearn cross_validation object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger a default 3-fold shuffle split.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
n_jobs : int (default 1)
Number of jobs to run in parallel.
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False
verbose : bool | str | int | or None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
[2] Ledoit, O., Wolf, M., (2004). A well-conditioned estimator for
large-dimensional covariance matrices. Journal of Multivariate
Analysis 88 (2), 365 - 411.
[3] Tipping, M. E., Bishop, C. M., (1999). Probabilistic principal
component analysis. Journal of the Royal Statistical Society: Series
B (Statistical Methodology) 61 (3), 611 - 622.
[4] Barber, D., (2012). Bayesian reasoning and machine learning.
Cambridge University Press., Algorithm 21.1
"""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'shrunk', 'pca', 'factor_analysis',)
msg = ('Invalid method ({method}). Accepted values (individually or '
'in a list) are "%s"' % '" or "'.join(accepted_methods + ('None',)))
# scale to natural unit for best stability with MEG/EEG
if isinstance(scalings, dict):
for k, v in scalings.items():
if k not in ('mag', 'grad', 'eeg'):
raise ValueError('The keys in `scalings` must be "mag" or'
'"grad" or "eeg". You gave me: %s' % k)
scalings = _handle_default('scalings', scalings)
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'grad': 0.01, 'mag': 0.01, 'eeg': 0.0,
'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
for epochs_t in epochs:
if epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5:
warnings.warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
for epoch in epochs:
epoch.info._check_consistency()
bads = epochs[0].info['bads']
if projs is None:
projs = cp.deepcopy(epochs[0].info['projs'])
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
else:
projs = cp.deepcopy(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if method == 'auto':
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
ok_sklearn = check_sklearn_version('0.15') is True
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`')
if keep_sample_mean is False:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with `method="empirical"`')
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = list(np.zeros(n_epoch_types))
n_samples = np.zeros(n_epoch_types, dtype=np.int)
n_epochs = np.zeros(n_epoch_types, dtype=np.int)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
if not all(k in accepted_methods for k in method):
raise ValueError(msg.format(method=method))
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data()[:, picks_meeg, tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
if ok_sklearn:
cov_data = _compute_covariance_auto(epochs, method=method,
method_params=_method_params,
info=info,
verbose=verbose,
cv=cv,
n_jobs=n_jobs,
# XXX expose later
stop_early=True, # if needed.
picks_list=picks_list,
scalings=scalings)
else:
if _method_params['empirical']['assume_centered'] is True:
cov = epochs.T.dot(epochs) / n_samples_tot
else:
cov = np.cov(epochs.T, bias=1)
cov_data = {'empirical': {'data': cov}}
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= n_samples_tot
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot)
logger.info('Number of samples used : %d' % n_samples_tot)
logger.info('[done]')
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
if ok_sklearn:
msg = ['log-likelihood on unseen data (descending order):']
logliks = [(c['method'], c['loglik']) for c in covs]
logliks.sort(reverse=True, key=lambda c: c[1])
for k, v in logliks:
msg.append('%s: %0.3f' % (k, v))
logger.info('\n '.join(msg))
if ok_sklearn and not return_estimators:
keys, scores = zip(*[(c['method'], c['loglik']) for c in covs])
out = covs[np.argmax(scores)]
logger.info('selecting best estimator: {0}'.format(out['method']))
elif ok_sklearn:
out = covs
out.sort(key=lambda c: c['loglik'], reverse=True)
else:
out = covs[0]
return out
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list,
verbose):
"""docstring for _compute_covariance_auto."""
from sklearn.grid_search import GridSearchCV
from sklearn.covariance import (LedoitWolf, ShrunkCovariance,
EmpiricalCovariance)
# rescale to improve numerical stability
_apply_scaling_array(data.T, picks_list=picks_list, scalings=scalings)
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
_RegCovariance, _ShrunkCovariance = _get_covariance_classes()
for this_method in method:
data_ = data.copy()
name = this_method.__name__ if callable(this_method) else this_method
logger.info(msg % name.upper())
if this_method == 'empirical':
est = EmpiricalCovariance(**method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'diagonal_fixed':
est = _RegCovariance(info=info, **method_params[this_method])
est.fit(data_)
_info = None
estimator_cov_info.append((est, est.covariance_, _info))
elif this_method == 'ledoit_wolf':
shrinkages = []
lw = LedoitWolf(**method_params[this_method])
for ch_type, picks in picks_list:
lw.fit(data_[:, picks])
shrinkages.append((
ch_type,
lw.shrinkage_,
picks
))
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'shrunk':
shrinkage = method_params[this_method].pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**method_params[this_method]),
tuned_parameters, cv=cv)
for ch_type, picks in picks_list:
gs.fit(data_[:, picks])
shrinkages.append((
ch_type,
gs.best_estimator_.shrinkage,
picks
))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages,
**method_params[this_method])
sc.fit(data_)
_info = None
estimator_cov_info.append((sc, sc.covariance_, _info))
elif this_method == 'pca':
mp = method_params[this_method]
pca, _info = _auto_low_rank_model(data_, this_method,
n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
elif this_method == 'factor_analysis':
mp = method_params[this_method]
fa, _info = _auto_low_rank_model(data_, this_method, n_jobs=n_jobs,
method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
logger.info('Using cross-validation to select the best estimator.')
estimators, _, _ = zip(*estimator_cov_info)
logliks = np.array([_cross_val(data, e, cv, n_jobs) for e in estimators])
# undo scaling
for c in estimator_cov_info:
_undo_scaling_cov(c[1], picks_list, scalings)
out = dict()
estimators, covs, runtime_infos = zip(*estimator_cov_info)
cov_methods = [c.__name__ if callable(c) else c for c in method]
runtime_infos, covs = list(runtime_infos), list(covs)
my_zip = zip(cov_methods, runtime_infos, logliks, covs, estimators)
for this_method, runtime_info, loglik, data, est in my_zip:
out[this_method] = {'loglik': loglik, 'data': data, 'estimator': est}
if runtime_info is not None:
out[this_method].update(runtime_info)
return out
def _logdet(A):
"""Compute the log det of a symmetric matrix."""
vals = linalg.eigh(A)[0]
vals = np.abs(vals) # avoid negative values (numerical errors)
return np.sum(np.log(vals))
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = np.zeros(n_samples)
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Helper to compute cross validation."""
from sklearn.cross_validation import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""compute latent variable models."""
method_params = cp.deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
elif mode == 'pca':
est = PCA
else:
raise ValueError('Come on, this is not a low rank estimator: %s' %
mode)
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(cp.deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warnings.warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0.) and
stop_early is True):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
def _get_covariance_classes():
"""Prepare special cov estimators."""
from sklearn.covariance import (EmpiricalCovariance, shrunk_covariance,
ShrunkCovariance)
class _RegCovariance(EmpiricalCovariance):
"""Aux class."""
def __init__(self, info, grad=0.01, mag=0.01, eeg=0.0,
store_precision=False, assume_centered=False):
self.info = info
self.grad = grad
self.mag = mag
self.eeg = eeg
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
EmpiricalCovariance.fit(self, X)
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(cov_, self.info, grad=self.grad, mag=self.mag,
eeg=self.eeg, proj=False,
exclude='bads') # ~proj == important!!
self.covariance_ = cov_.data
return self
class _ShrunkCovariance(ShrunkCovariance):
"""Aux class."""
def __init__(self, store_precision, assume_centered, shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
EmpiricalCovariance.fit(self, X)
cov = self.covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples
is the number of samples and n_features is the number of
features. X_test is assumed to be drawn from the same
distribution as the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
from sklearn.covariance import empirical_covariance, log_likelihood
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.get_precision())
return res
return _RegCovariance, _ShrunkCovariance
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : string
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
""""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = linalg.eigh(A, overwrite_a=True)
eigvec = eigvec.T
eig[:-rank] = 0.0
logger.info('Setting small %s eigenvalues to zero.' % ch_type)
if not pca: # No PCA case.
logger.info('Not doing PCA for %s.' % ch_type)
else:
logger.info('Doing PCA for %s.' % ch_type)
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec
@verbose
def prepare_noise_cov(noise_cov, info, ch_names, rank=None,
scalings=None, verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list
The channel names to be considered.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
C_ch_idx = [noise_cov.ch_names.index(c) for c in ch_names]
if noise_cov['diag'] is False:
C = noise_cov.data[np.ix_(C_ch_idx, C_ch_idx)]
else:
C = np.diag(noise_cov.data[C_ch_idx])
scalings = _handle_default('scalings_cov_rank', scalings)
# Create the projection operator
proj, ncomp, _ = make_projector(info['projs'], ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
pick_meg = pick_types(info, meg=True, eeg=False, ref_meg=False,
exclude='bads')
pick_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
meg_names = [info['chs'][k]['ch_name'] for k in pick_meg]
C_meg_idx = [k for k in range(len(C)) if ch_names[k] in meg_names]
eeg_names = [info['chs'][k]['ch_name'] for k in pick_eeg]
C_eeg_idx = [k for k in range(len(C)) if ch_names[k] in eeg_names]
has_meg = len(C_meg_idx) > 0
has_eeg = len(C_eeg_idx) > 0
# Get the specified noise covariance rank
if rank is not None:
if isinstance(rank, dict):
rank_meg = rank.get('meg', None)
rank_eeg = rank.get('eeg', None)
else:
rank_meg = int(rank)
rank_eeg = None
else:
rank_meg, rank_eeg = None, None
if has_meg:
C_meg = C[np.ix_(C_meg_idx, C_meg_idx)]
this_info = pick_info(info, pick_meg)
if rank_meg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_meg_idx)
rank_meg = _estimate_rank_meeg_cov(C_meg, this_info, scalings)
C_meg_eig, C_meg_eigvec = _get_ch_whitener(C_meg, False, 'MEG',
rank_meg)
if has_eeg:
C_eeg = C[np.ix_(C_eeg_idx, C_eeg_idx)]
this_info = pick_info(info, pick_eeg)
if rank_eeg is None:
if len(C_meg_idx) < len(pick_meg):
this_info = pick_info(info, C_eeg_idx)
rank_eeg = _estimate_rank_meeg_cov(C_eeg, this_info, scalings)
C_eeg_eig, C_eeg_eigvec = _get_ch_whitener(C_eeg, False, 'EEG',
rank_eeg)
if not _has_eeg_average_ref_proj(info['projs']):
warnings.warn('No average EEG reference present in info["projs"], '
'covariance may be adversely affected. Consider '
'recomputing covariance using a raw file with an '
'average eeg reference projector added.')
n_chan = len(ch_names)
eigvec = np.zeros((n_chan, n_chan), dtype=np.float)
eig = np.zeros(n_chan, dtype=np.float)
if has_meg:
eigvec[np.ix_(C_meg_idx, C_meg_idx)] = C_meg_eigvec
eig[C_meg_idx] = C_meg_eig
if has_eeg:
eigvec[np.ix_(C_eeg_idx, C_eeg_idx)] = C_eeg_eigvec
eig[C_eeg_idx] = C_eeg_eig
assert(len(C_meg_idx) + len(C_eeg_idx) == n_chan)
noise_cov = cp.deepcopy(noise_cov)
noise_cov.update(data=C, eig=eig, eigvec=eigvec, dim=len(ch_names),
diag=False, names=ch_names)
return noise_cov
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
**Note:** This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
`compute_covariance` to directly combine estimation with regularization
in a data-driven fashion see the
`faq <http://martinos.org/mne/dev/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default true)
Apply or not projections to keep rank of data.
verbose : bool | str | int | None (default None)
If not None, override default verbose level (see mne.verbose).
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
compute_covariance
""" # noqa
cov = cp.deepcopy(cov)
info._check_consistency()
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
info_ch_names = info['ch_names']
ch_names_eeg = [info_ch_names[i] for i in sel_eeg]
ch_names_mag = [info_ch_names[i] for i in sel_mag]
ch_names_grad = [info_ch_names[i] for i in sel_grad]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
idx_eeg, idx_mag, idx_grad = [], [], []
for i, ch in enumerate(ch_names):
if ch in ch_names_eeg:
idx_eeg.append(i)
elif ch in ch_names_mag:
idx_mag.append(i)
elif ch in ch_names_grad:
idx_grad.append(i)
else:
raise Exception('channel is unknown type')
C = cov_good['data']
assert len(C) == (len(idx_eeg) + len(idx_mag) + len(idx_grad))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for desc, idx, reg in [('EEG', idx_eeg, eeg), ('MAG', idx_mag, mag),
('GRAD', idx_grad, grad)]:
if len(idx) == 0 or reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
if proj:
this_ch_names = [ch_names[k] for k in idx]
P, ncomp, _ = make_projector(projs, this_ch_names)
U = linalg.svd(P)[0][:, :-ncomp]
if ncomp > 0:
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
if proj and ncomp > 0:
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None):
"""Compute a regularized covariance from data using sklearn.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
Data for covariance estimation.
reg : float | str | None (default None)
If not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
if reg is None:
# compute empirical covariance
cov = np.cov(data)
else:
no_sklearn_err = ('the scikit-learn package is missing and '
'required for covariance regularization.')
# use sklearn covariance estimators
if isinstance(reg, float):
if (reg < 0) or (reg > 1):
raise ValueError('0 <= shrinkage <= 1 for '
'covariance regularization.')
try:
import sklearn
sklearn_version = LooseVersion(sklearn.__version__)
from sklearn.covariance import ShrunkCovariance
except ImportError:
raise Exception(no_sklearn_err)
if sklearn_version < '0.12':
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False)
else:
# init sklearn.covariance.ShrunkCovariance estimator
skl_cov = ShrunkCovariance(shrinkage=reg,
store_precision=False,
assume_centered=True)
elif isinstance(reg, six.string_types):
if reg == 'ledoit_wolf':
try:
from sklearn.covariance import LedoitWolf
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.LedoitWolf estimator
skl_cov = LedoitWolf(store_precision=False,
assume_centered=True)
elif reg == 'oas':
try:
from sklearn.covariance import OAS
except ImportError:
raise Exception(no_sklearn_err)
# init sklearn.covariance.OAS estimator
skl_cov = OAS(store_precision=False,
assume_centered=True)
else:
raise ValueError("regularization parameter should be "
"'lwf' or 'oas'")
else:
raise ValueError("regularization parameter should be "
"of type str or int (got %s)." % type(reg))
# compute regularized covariance using sklearn
cov = skl_cov.fit(data.T).covariance_
return cov
def compute_whitener(noise_cov, info, picks=None, rank=None,
scalings=None, verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict
The measurement info.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
W : 2d array
The whitening matrix.
ch_names : list
The channel names.
"""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
ch_names = [info['chs'][k]['ch_name'] for k in picks]
noise_cov = cp.deepcopy(noise_cov)
noise_cov = prepare_noise_cov(noise_cov, info, ch_names,
rank=rank, scalings=scalings)
n_chan = len(ch_names)
W = np.zeros((n_chan, n_chan), dtype=np.float)
#
# Omit the zeroes due to projection
#
eig = noise_cov['eig']
nzero = (eig > 0)
W[nzero, nzero] = 1.0 / np.sqrt(eig[nzero])
#
# Rows of eigvec are the eigenvectors
#
W = np.dot(W, noise_cov['eigvec'])
W = np.dot(noise_cov['eigvec'].T, W)
return W, ch_names
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=False, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data
noise_cov : instance of Covariance
The noise covariance
picks : array-like of int | None
The channel indices to whiten. Can be None to whiten MEG and EEG
data.
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
rank : None | int | dict (default None)
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = cp.deepcopy(evoked)
if picks is None:
picks = pick_types(evoked.info, meg=True, eeg=True)
W = _get_whitener_data(evoked.info, noise_cov, picks,
diag=diag, rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _get_whitener_data(info, noise_cov, picks, diag=False, rank=None,
scalings=None, verbose=None):
"""Get whitening matrix for a set of data."""
ch_names = [info['ch_names'][k] for k in picks]
noise_cov = pick_channels_cov(noise_cov, include=ch_names, exclude=[])
info = pick_info(info, picks)
if diag:
noise_cov = cp.deepcopy(noise_cov)
noise_cov['data'] = np.diag(np.diag(noise_cov['data']))
scalings = _handle_default('scalings_cov_rank', scalings)
W = compute_whitener(noise_cov, info, rank=rank, scalings=scalings)[0]
return W
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
from scipy import sparse
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = read_bad_channels(fid, this)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
def _apply_scaling_array(data, picks_list, scalings):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
data *= scalings[:, np.newaxis] # F - order
def _undo_scaling_array(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_array(data, picks_list, scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
scalings = dict((k, 1. / v) for k, v in scalings.items())
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, string_types) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def _estimate_rank_meeg_signals(data, info, scalings, tol=1e-4,
return_singular=False, copy=True):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape(n_channels, n_samples)
The M/EEG signals.
info : mne.io.measurement_info.Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e15, grad=1e13, eeg=1e6)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
_apply_scaling_array(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular, copy=copy)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_array(data, picks_list, scalings)
return out
def _estimate_rank_meeg_cov(data, info, scalings, tol=1e-4,
return_singular=False, copy=True):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape (n_channels, n_channels)
The M/EEG covariance.
info : mne.io.measurement_info.Info
The measurment info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e12, grad=1e11, eeg=1e5)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
copy : bool
If False, values in data will be modified in-place during
rank estimation (saves memory).
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', scalings)
_apply_scaling_cov(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular, copy=copy)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info('estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_cov(data, picks_list, scalings)
return out
| bsd-3-clause |
alexeyum/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
jpedroan/megua | megua/ug.py | 1 | 20733 | # coding=utf-8
r"""
UnifiedGraphics - This module defines common operations for graphics.
AUTHORS:
- Pedro Cruz (2016-01): First version (refactoring old "megua" for SMC).
DISCUSSION:
Image sources are:
- sage commands (plots, graphcs, etc)
- R commands
- Static images
- Python Matplotlib and other python libs
- LaTeX using "standalone" package
- TikZ (a particular case of the above)
- other sources...?
Exporting them to:
- <img> tags in html
- <svg> tags in html
- folder of images for some exercise (for example, a latex document)
- filesystem or database
EXAMPLES:
Test examples using:
::
sage -t ug.py
Defining a new exercise template:
::
sage: from megua.exbase import ExerciseBase
sage: class DrawSegment(ExerciseBase):
....: #class variables
....: _unique_name = "DrawSegment"
....: _suggestive_name = "Draw a segment"
....: _summary_text = "Draw a segment."
....: _problem_text = "Draw the segment a1 + a2@()x for x in [x1,x2]."
....: _answer_text = "<p>fig1</p><p>img1</p>"
....:
....: def make_random(self,edict=None):
....: self.a1 = ZZ.random_element(-10,10+1)
....: self.a2 = ZZ.random_element(-10,10+1)
....: self.x1 = ZZ.random_element(-5,0)
....: self.x2 = ZZ.random_element( 1,5)
....: p = plot( self.a1 + self.a2*x, x, self.x1, self.x2)
....: self.fig1 = self.sage_graphic(graphobj=p,varname="fig1",dimx=50,dimy=50,dpi=10)
....: self.img1 = self.static_image(url="http://www.sagemath.org/pix/sage-sticker-1x1_inch-small.png",dimx=50,dimy=50)
....: ExerciseBase.make_random(self,edict)
Plot using embed images with base64 and svg:
::
sage: ex = DrawSegment(ekey=0)
sage: #ex.print_instance() #render = base64, long textual answer
Plot using file and <img> tag:
::
sage: ex.update(ekey=0,render_method="imagefile") #update graphic links, keep ekey=0
sage: ex.print_instance()
------------------------
Instance of: DrawSegment
------------------------
==> Summary:
Draw a segment.
==> Problem instance
Draw the segment 1 + (-7)x for x in [-4,2].
==> Answer instance
<p>\n<img src='_output/DrawSegment/DrawSegment-fig1-0.png' alt='DrawSegment-fig1-0.png graphic' height='50' width='50'></img>\n</p><p>\n<img src='_output/DrawSegment/sage-sticker-1x1_inch-small.png' alt='sage-sticker-1x1_inch-small.png graphic' height='50' width='50'></img>\n</p>
Plot using ascii art:
::
sage: ex.update(ekey=0,render_method="asciiart") #must be called to update graphic links
sage: #ex.print_instance() #long output
Using LaTeX to generate graphics or extractions from LaTeX:
::
sage: class LaTexBasedImages(ExerciseBase):
....: #class variables
....: _unique_name = "LaTexBasedImages"
....: _suggestive_name = "LaTex Based Images"
....: _summary_text = "LaTex Based Images."
....: _problem_text = "Check this:"
....: _answer_text = r'''<latex 100%>\[\sqrt x\]</latex> '''\
....: r'''<latex 100%>\fbox{Olá}</latex>'''
sage: ex = LaTexBasedImages()
sage: ex.update(ekey=0,render_method="imagefile")
sage: print ex.latex_render(ex.answer()) #long output
<BLANKLINE>
<img src='_output/LaTexBasedImages/LaTexBasedImages-0-00.png' alt='LaTexBasedImages-0-00.png graphic' height='47' width='47'></img>
<BLANKLINE>
<img src='_output/LaTexBasedImages/LaTexBasedImages-0-01.png' alt='LaTexBasedImages-0-01.png graphic' height='59' width='47'></img>
<BLANKLINE>
Example with an ascii art graphic:
::
sage: class UnitCircle(ExerciseBase):
....: #class variables
....: _unique_name = "UnitCircle"
....: _suggestive_name = "Draw a segment."
....: _summary_text = "Plot a Circle"
....: _problem_text = "Draw a unit circle."
....: _answer_text = "\nplot1\n"
....: def make_random(self,edict=None):
....: c = circle( (0,0),1,thickness=2,fill=True,facecolor='black')
....: c.axes(False)
....: self.plot1 = self.sage_graphic(graphobj=c,varname="plot1",dimx=10,dimy=10)
sage: ex = UnitCircle(ekey=0,rendermethod="asciiart")
sage: print ex.answer() #expected graphic in asciiart
<BLANKLINE>
QQQQ??4QQQ
QQ' 4Q
Q' 4
Q ]
f
f
6 _
Q j
Q6 _Q
QQga _wQQ
<BLANKLINE>
sage: print ex.image_relativepathnames
['_output/UnitCircle/UnitCircle-plot1-0.png']
DEVELOPMENT:
Install aalib insto SageMath using this:
$ sage -pip install --user https://pypi.python.org/packages/2d/3d/dca492960070685bc1bc12535d274a840f35cf5267f2a4a6ee36f3eb3dd7/python-aalib-0.3.tar.gz#md5=00afa7ef3479649cec99046449c07ef9Collecting https://pypi.python.org/packages/2d/3d/dca492960070685bc1bc12535d274a840f35cf5267f2a4a6ee36f3eb3dd7/python-aalib-0.3.tar.gz#md5=00afa7ef3479649cec99046449c07ef9
Downloading python-aalib-0.3.tar.gz
Installing collected packages: python-aalib
Running setup.py install for python-aalib ... done
Successfully installed python-aalib
"""
#*****************************************************************************
# Copyright (C) 2016 Pedro Cruz <PedroCruz@ua.pt>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
#SAGEMATH modules
from sage.all import * #All Sage Graphics
#MEGUA modules
from megua.jinjatemplates import templates
from megua.platex import pcompile
#PYTHON modules
#import io
#import urllib2
import os
#TODO: postponed because of problems with python setup.py install
#import aalib
import PIL.Image
import re
import subprocess
class UnifiedGraphics:
"""Class ``UnifiedGraphics``: a class to handle graphics and images."""
def __init__(self,rendermethod='imagefile'):
#embed images in html (or other source)
self.render_method(rendermethod)
#TODO: this values are not used yet.
#default values
self.paperx_cm = 5 #cm
self.papery_cm = 5 #cm
self.screen_x = 100 #pixels
self.screen_y = 100 #pixels
self.dpi = 100
#Is the same as exbase.working_dir (different for each exercise):
assert(self.wd_relative)
assert(self.wd_fullpath)
#To avoid duplicated image names is used a set():
self.image_relativepathnames = set()
self.image_fullpathnames = set()
#TODO: tirar este método: WHY???
def render_method(self,rendermethod=None):
if rendermethod in ['includegraphics','imagefile', 'base64', 'asciiart']:
self._rendermethod = rendermethod
return self._rendermethod
elif not rendermethod:
return self._rendermethod
else:
raise NotImplementedError("ug module: method '{0}' not implemented.".format(rendermethod))
def __str__(self):
return "UnifiedGraphics"
def __repr__(self):
return "UnifiedGraphics({0})".format(self.__dict__)
def get_ekey(self):
raise NotImplementedError
def unique_name(self):
raise NotImplementedError
def _render(self,gfilename,paper_cm=None,scr_pixels=None):
r"""Render image `gfilename` using one of the methods:
- base64 and svg tag
- <img> and file
- asciiart
defined in self._rendermethod.
INPUT:
- `gfilename`: name of the file (with extension) that is stored in sekf.imagedirectory.
- `paper_cm`: pair (x,y) in cm
- `scr_pixels`: pair(x,y) in pixels
OUTPUT:
- return a string with:
- svg tag and a large base64 string
- <img> tag pointing to a file on directory system
- asciiart string.(TODO: difficult to implement insice SMC)
This method also adds the gfilename to exericse own image list.
"""
assert(gfilename)
relative_pathname = os.path.join(self.wd_relative,gfilename)
full_pathname = os.path.join(self.wd_fullpath,gfilename)
if self._rendermethod=='imagefile':
self.image_relativepathnames.add(relative_pathname)
self.image_fullpathnames.add(full_pathname)
return r"<img src='%s' alt='%s' height='%d' width='%d' style='background-color:white;'/>" % (relative_pathname,gfilename+' graphic',scr_pixels[1],scr_pixels[0]) #
elif self._rendermethod=='includegraphics':
self.image_relativepathnames.add(relative_pathname)
self.image_fullpathnames.add(full_pathname)
return "\n\\includegraphics[height=%dcm,width=%dcm]{%s}\n" % (paper_cm[1],papercm[0],full_pathname)
elif self._rendermethod=='asciiart':
print "ug.py say: 'asciiart' is not yet implemented"
#screen = aalib.AsciiScreen(width=dimx, height=dimy)
#image = PIL.Image.open(pathname).convert('L').resize(screen.virtual_size)
#screen.put_image((0, 0), image)
#return screen.render()
return "ug.py say: 'asciiart' is not yet implemented"
elif self._rendermethod=='base64':
#'\n<img height="%d" width="%d" src="data:image/png;base64,{0}"></img>\n'.format(....)
data_uri = open(pathname, 'rb').read().encode('base64').replace('\n', '')
img_tag = templates.render("ug_svg.html",
dimx=scr_pixels[0],
dimy=scr_pixels[1],
base64=data_uri)
return img_tag
else:
raise("ug.py module: render method not implemented.")
def static_image(self,
imagefilename=None,
url=None,
paper_cm=None,
scr_pixels=None):
"""This function is to be called by the author in the make_random.
INPUT:
- `imagefilename`: it is full path, or relative to MEGUA_EXERCISE_INPUT, filename where a graphic or picture is stored in filesystem.
- `url`: full url (http://...) where image is stored.
- `paper_cm`: pair (x,y) in cm
- `scr_pixels`: pair(x,y) in pixels or None if size is to be read from image file.
- `kwargs`: other keyword=value pairs for sage or matlotlib savefig command.
NOTES:
- see also ``s.sage_graphic``.
"""
if url:
#TODO: use this instead of "wget"
#fp = io.BytesIO(urllib2.urlopen('https://www.python.org/static/favicon.ico').read())
#image = PIL.Image.open(fp).convert('L').resize(screen.virtual_size)
os.system(r"""cd {0}; wget -q '{1}'""".format(self.wd_fullpath,url))
gfilename = os.path.split(url)[1]
if imagefilename:
#Check if image does exist on target (user could copy image to the target directory)
#import os.path
#if not os.path.isfile(imagefilename):
#Copy allways: file could be changed.
os.system('cp "{0}" "{1}"'.format(imagefilename,self.wd_fullpath))
gfilename = os.path.split(imagefilename)[1]
pathname = os.path.join(self.wd_fullpath,gfilename)
if not scr_pixels:
#Get dimensions
with PIL.Image.open(pathname) as f:
scr_pixels = f.size
return self._render(gfilename, paper_cm, scr_pixels)
def sage_graphic(self,
graphic_object,
varname,
paper_cm=None,
scr_pixels=None,
dimx=400,dimy=400,
gtype='svg',
**kwargs):
"""This function is to be called by the author in the make_random or solve part.
INPUT:
- `graphic_object`: some graphic object.
- `varname`: user supplied string that will be part of the filename.
- `paper_cm`: pair (x,y) with size in centimeters.
- `scr_pixels`: pair (x,y) with size in pixels.
- `dimx,dimy`: for compatibility.
- `gtype`: can be svg, png, "etc"
- `kwargs`: other keyword=value pairs for sage or matlotlib savefig command.
The `graphic_object`could be a:
- sage.plot.graphics.Graphics
- matplotlib object
Read more in:
- http://stackoverflow.com/questions/3396475/specifying-width-and-height-as-percentages-without-skewing-photo-proportions-in
"""
gfilename = '%s-%s-%d.%s'%(self.unique_name(),varname,self.get_ekey(),gtype)
if os.path.exists(self.wd_relative):
gpathname = os.path.join(self.wd_relative,gfilename)
else:
gpathname = os.path.join(self.wd_fullpath,gfilename)
#create if does not exist the "image" directory
#os.system("mkdir -p images") #The "-p" ommits errors if it exists.
#TODO: protect agains too big images.
if paper_cm:
#convert to inches
fsize = (paper_cm[0]/2.54,paper_cm[1]/2.54)
elif scr_pixels:
#convert to inches
fsize = (scr_pixels[0]/100,scr_pixels[1]/100) #assume dpi=100
else:
fsize = (dimx/2.54,dimy/2.54) #old behaviour
if type(graphic_object)==sage.plot.graphics.Graphics:
graphic_object.save(
gpathname,
figsize=fsize,
**kwargs)
else: #matplotlib assumed
#http://stackoverflow.com/questions/9622163/matplotlib-save-plot-to-image-file-instead-of-displaying-it-so-can-be-used-in-b
import matplotlib.pyplot as plt
from pylab import savefig
fig = plt.gcf() #Get Current Figure: gcf
#old: fig.set_size_inches(paper_cm[0]/2.54,paper_cm[1]/2.54) #(dimx/2.54,dimy/2.54)
#old: savefig(gpathname,figsize=(paper_cm[0]/2.54,paper_cm[1]/2.54),**kwargs)
savefig(gpathname,figsize=fsize,**kwargs)
#TODO: savefig is saving what graphic? What to do with graphic_object parameter?
if not paper_cm:
paper_cm = (fsize[0]/2.54,fsize[1]/2.54)
if not scr_pixels:
scr_pixels = (fsize[0]*100,fsize[1]*100)
return self._render(gfilename,paper_cm,scr_pixels)
def latex_render(self,input_text):
"""Returns a new text obtained by transforming `input_text`:
* all tag pairs <latex percent%> ... </latex> that
are present in `input_text` are replaced by "images" created from
the LaTeX inside tag pairs and a `new_text` is returned.
INPUT:
- `input_text` -- some text (problem or answer) eventually with <latex percent%> ... </latex> tags.
OUTPUT:
- `string` -- with images created from latex inside tags
NOTE:
- Dimensions are specifyed in each <latex tag> and not in dimx=150,dimy=150.
DEVELOPER NOTES:
- check LATEXIMG.PY
- important \\ and \{
- old pattern:
- tikz_pattern = re.compile(r'\\begin\{tikzpicture\}(.+?)\\end\{tikzpicture\}', re.DOTALL|re.UNICODE)
Latex packages:
- standalone: cuts "paper" around the tikzpicture (and other environments)
- adjustbox package: http://mirrors.fe.up.pt/pub/CTAN/macros/latex/contrib/adjustbox/adjustbox.pdf
About the standalone package:
- \documentclass[varwidth=true, border=10pt, convert={density=100,outfile="gfilename.png"} ]{standalone}
- the above command generates a gfilename.png but needs --shell_escape in pdflatex command.
old way to convert latex/tikz to png:
::
#The following is done by standalone package:
##convert -density 600x600 pic.pdf -quality 90 -resize 800x600 pic.png
##cmd = "cd _images;convert -density 100x100 '{0}.pdf' -quality 95 -resize {1} '{0}.png' 2>/dev/null".format(
#gfilename,match.group(1),gfilename)
#print "============== CMD: ",cmd
#os.system(cmd)
#os.system("cp _images/%s.tex ." % gfilename)
"""
#Organization of the tag pair:
#print "Group 0:",match.group(0) #all
#print "Group 1:",match.group(1) #scale (see http://www.imagemagick.org/script/command-line-processing.php#geometry)
#print "Group 2:",match.group(2) #what is to compile
latex_pattern = re.compile(r'<\s*latex\s+(\d+%)\s*>(.+?)<\s*/latex\s*>', re.DOTALL|re.UNICODE)
latex_error_pattern = re.compile(r"!.*?l\.(\d+)(.*?)$",re.DOTALL|re.M)
#Cycle through existent latex code and produce pdf and png files.
graphic_number = 0
match_iter = re.finditer(latex_pattern,input_text)#create an iterator
for match in match_iter:
#Graphic filename
gfilename_base = '%s-%d-%02d'%(self.unique_name(),self.get_ekey(),graphic_number)
gfilename = '%s-%d-%02d.png'%(self.unique_name(),self.get_ekey(),graphic_number)
#Compile what is inside <latex>...</latex> to a image
latex_source = match.group(2)
try:
latex_document = templates.render("standalone_latex.tex",
gfilename=gfilename,
latex_source=latex_source)
pcompile(latex_document,self.wd_fullpath,gfilename)
cmd = "cd {2};convert -density 100x100 '{0}.pdf' -quality 95 -resize {1} '{0}.png' 2>/dev/null".format(
gfilename_base,match.group(1),self.wd_fullpath)
#TODO: check that "convert" is installed
os.system(cmd)
graphic_number += 1
except subprocess.CalledProcessError as err:
# ==============================
#TODO: modify this for standalone package:
# ==============================
#Try to show the message to user
#print "Error:",err
#print "returncode:",err.returncode
#print "output:",err.output
#print "================"
match = latex_error_pattern.search(err.output) #create an iterator
if match:
print match.group(0)
else:
print "There was a problem with an latex image file."
#TODO: check this code below:
#if latex inside codemirror does not work
#this is the best choice:
#print "You can download %s.tex and use your windows LaTeX editor to help find the error." % gfilename
# os.system("mv _images/%s.tex ." % gfilename)
#
# #Using HTML and CodeMirror to show the error.
# print "You can open %s.html to help debuging." % gfilename
# tikz_html = templates.render("latex_viewer.html",
# pgfrealjobname=r"\pgfrealjobname{%s}"%self.name,
# beginname=r"\beginpgfgraphicnamed{%s}"%gfilename,
# tikz_tex=tikz_tex,
# sname=self.name,
# errmessage=match.group(0),
# linenum=match.group(1)
# )
#
# f = codecs.open(gfilename+'.html', mode='w', encoding='utf-8')
# f.write(tikz_html)
# f.close()
# print "================"
raise err
#Cycle through existent tikz code and produce a new html string .
new_text = input_text
for gn in range(graphic_number):
gfilename = '%s-%d-%02d.png'%(self.unique_name(),self.get_ekey(),gn)
#Get image dimensions
pathname = os.path.join(self.wd_fullpath,gfilename)
with PIL.Image.open(pathname) as f:
scr_pixels = f.size
img_string = self._render(gfilename,scr_pixels=scr_pixels)
(new_text,number) = latex_pattern.subn(img_string, new_text, count=1)
assert(number)
return new_text
###### def _render(self,gfilename,paper_cm,scr_pixels):
#end of ug.py
| gpl-3.0 |
ryfeus/lambda-packs | Skimage_numpy/source/scipy/stats/_multivariate.py | 13 | 99071 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'special_ortho_group',
'ortho_group',
'random_correlation']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", arXiv:math-ph/0609050v2.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
| mit |
lotrus28/TaboCom | linear_model/model_pick/random_forest/parallel_script.py | 1 | 3560 | import itertools
import os.path
import sys
import subprocess
import time
import fileinput
import numpy as np
import pandas as pd
# Enter 1 parameter: otu table with reads
path = sys.argv[1]
cond = path.split('/')[-1].split('.')[0]
def teach_predictor(path, params, same, job, wait):
time.sleep(1)
if not(wait is None):
wait = ' -hold_jid ' + wait + ' -cwd'
command = 'echo "bash ./teach_models.sh ' + path + ' ' + ' '.join([str(x) for x in params]) + ' ' + \
' '.join([str(x) for x in same]) +'" | qsub -N ' + job + wait
else:
command = 'echo "bash ./teach_models.sh ' + path + ' ' + ' '.join([str(x) for x in params]) + ' ' + \
' '.join([str(x) for x in same]) + '" | qsub -N ' + job + ' -cwd'
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# input_string = "./teach_models.sh {} {} {}\n".format(*(map(pipes.quote, [path, params, same])))
# print(input_string)
# p = subprocess.Popen(['qsub', '-N', job, '-cwd'], stdin=subprocess.PIPE)
# out, err = p.communicate(input_string)
return()
# Create a df with all possible combinations of parameters
columns = ['Taxon_trim', 'SparCC_pval', 'SparCC_cor', 'Tax_adjacency',
'Pair_fstat', 'RMSE_sign', 'Specificity', 'Sensitivity']
params = pd.DataFrame(columns=columns)
values = {}
values['Taxon_trim'] =[0.5]
values['SparCC_cor'] = [0.2]
values['SparCC_pval'] = [0.05]
# Erase brackets from tax_code, because later model calling can't work with them
# sed might not work for some reason
command = "sed -ie 's/\[/BRA/g;s/\]/KET/g;s/-/SLASH/g' %s" % path
subprocess.Popen(command, shell=True)
time.sleep(5)
command = "rm %se" % path
subprocess.Popen(command, shell=True)
with fileinput.FileInput(path, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(']', 'KET').replace('[','BRA').replace('-','SLASH'), end='')
# First, create SparCC-outputs
job = "Tax_trim"
for i in values['Taxon_trim']:
# teach_predictor(path, [i, min(values['SparCC_pval']), 100], [0,1,1], job, None)
teach_predictor(path, [i, 0.001, 100], [0, 1, 1], job, None)
wait = job
time.sleep(10)
# Then one level lower
job = "Spar_pval"
Tt_Sp = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'])]
# Make it so first all SparCC with lowest P-value are calculated
# Otherwise
Tt_Sp = sorted(Tt_Sp, key=lambda x: x[1])
for i in Tt_Sp:
teach_predictor(path, [i[0], i[1], 0.2], [1,0,1], job, wait)
time.sleep(5)
wait = job
time.sleep(3600)
# And we go all the way down
job = "Filter_sig"
Tt_Sp_Sc_Ta = [x for x in itertools.product(values['Taxon_trim'],
values['SparCC_pval'],
values['SparCC_cor'])]
for i in Tt_Sp_Sc_Ta:
teach_predictor(path, [i[0], i[1], i[2]], [1,1,0], job, wait)
time.sleep(3)
wait = job
time.sleep(2000)
wait = job
for f in ["Rplots.pdf", "cov_mat_SparCC.out", "get_pair_fstats.Rout"]:
command = 'echo "rm %s" | qsub -N CleanUp -hold_jid pair_Fstat -cwd' % f
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
command = 'echo "find . -maxdepth 1 -type f -size 0 | xargs -d"\n" rm -f" | qsub -N Finish -hold_jid CleanUp -cwd'
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# ls | grep -P '\.o' | xargs -d"\n" rm -f
# find . -maxdepth 1 -type f -size 0 | xargs -d"\n" rm -f
| apache-2.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/testing/jpl_units/StrConverter.py | 23 | 5293 | #===========================================================================
#
# StrConverter
#
#===========================================================================
"""StrConverter module containing class StrConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import matplotlib.units as units
from matplotlib.cbook import iterable
# Place all imports before here.
#===========================================================================
__all__ = [ 'StrConverter' ]
#===========================================================================
class StrConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has string data.
= INPUT VARIABLES
- axis The axis using this converter.
- unit The units to use for a axis with string data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
return None
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
if ( units.ConversionInterface.is_numlike( value ) ):
return value
if ( value == [] ):
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.get_xaxis():
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [ l.get_text() for l in labels if l.get_text() ]
if ( not labels ):
ticks = []
labels = []
if ( not iterable( value ) ):
value = [ value ]
newValues = []
for v in value:
if ( (v not in labels) and (v not in newValues) ):
newValues.append( v )
for v in newValues:
if ( labels ):
labels.append( v )
else:
labels = [ v ]
#DISABLED: This is disabled because matplotlib bar plots do not
#DISABLED: recalculate the unit conversion of the data values
#DISABLED: this is due to design and is not really a bug.
#DISABLED: If this gets changed, then we can activate the following
#DISABLED: block of code. Note that this works for line plots.
#DISABLED if ( unit ):
#DISABLED if ( unit.find( "sorted" ) > -1 ):
#DISABLED labels.sort()
#DISABLED if ( unit.find( "inverted" ) > -1 ):
#DISABLED labels = labels[ ::-1 ]
# add padding (so they do not appear on the axes themselves)
labels = [ '' ] + labels + [ '' ]
ticks = list(xrange( len(labels) ))
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks( ticks )
axis.set_ticklabels( labels )
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds( ticks[0], ticks[-1] )
if ( isXAxis ):
ax.set_xlim( ticks[0], ticks[-1] )
else:
ax.set_ylim( ticks[0], ticks[-1] )
result = []
for v in value:
# If v is not in labels then something went wrong with adding new
# labels to the list of old labels.
errmsg = "This is due to a logic error in the StrConverter class. "
errmsg += "Please report this error and its message in bugzilla."
assert ( v in labels ), errmsg
result.append( ticks[ labels.index(v) ] )
ax.viewLim.ignore(-1)
return result
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# The default behavior for string indexing.
return "indexed"
| gpl-3.0 |
Solid-Mechanics/matplotlib-4-abaqus | matplotlib/testing/jpl_units/Epoch.py | 6 | 7147 | #===========================================================================
#
# Epoch
#
#===========================================================================
"""Epoch module."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
import math
import datetime as DT
from matplotlib.dates import date2num
#
# Place all imports before here.
#===========================================================================
#===========================================================================
class Epoch:
# Frame conversion offsets in seconds
# t(TO) = t(FROM) + allowed[ FROM ][ TO ]
allowed = {
"ET" : {
"UTC" : +64.1839,
},
"UTC" : {
"ET" : -64.1839,
},
}
#-----------------------------------------------------------------------
def __init__( self, frame, sec=None, jd=None, daynum=None, dt=None ):
"""Create a new Epoch object.
Build an epoch 1 of 2 ways:
Using seconds past a Julian date:
# Epoch( 'ET', sec=1e8, jd=2451545 )
or using a matplotlib day number
# Epoch( 'ET', daynum=730119.5 )
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the epoch. Must be 'ET' or 'UTC'
- sec The number of seconds past the input JD.
- jd The Julian date of the epoch.
- daynum The matplotlib day number of the epoch.
- dt A python datetime instance.
"""
if ( ( sec is None and jd is not None ) or
( sec is not None and jd is None ) or
( daynum is not None and ( sec is not None or jd is not None ) ) or
( daynum is None and dt is None and ( sec is None or jd is None ) ) or
( daynum is not None and dt is not None ) or
( dt is not None and ( sec is not None or jd is not None ) ) or
( (dt is not None) and not isinstance(dt, DT.datetime) ) ):
msg = "Invalid inputs. Must enter sec and jd together, " \
"daynum by itself, or dt (must be a python datetime).\n" \
"Sec = %s\nJD = %s\ndnum= %s\ndt = %s" \
% ( str( sec ), str( jd ), str( daynum ), str( dt ) )
raise ValueError( msg )
if frame not in self.allowed:
msg = "Input frame '%s' is not one of the supported frames of %s" \
% ( frame, str( self.allowed.keys() ) )
raise ValueError( msg )
self._frame = frame
if dt is not None:
daynum = date2num( dt )
if daynum is not None:
# 1-JAN-0001 in JD = 1721425.5
jd = float( daynum ) + 1721425.5
self._jd = math.floor( jd )
self._seconds = ( jd - self._jd ) * 86400.0
else:
self._seconds = float( sec )
self._jd = float( jd )
# Resolve seconds down to [ 0, 86400 )
deltaDays = int( math.floor( self._seconds / 86400.0 ) )
self._jd += deltaDays
self._seconds -= deltaDays * 86400.0
#-----------------------------------------------------------------------
def convert( self, frame ):
if self._frame == frame:
return self
offset = self.allowed[ self._frame ][ frame ]
return Epoch( frame, self._seconds + offset, self._jd )
#-----------------------------------------------------------------------
def frame( self ):
return self._frame
#-----------------------------------------------------------------------
def julianDate( self, frame ):
t = self
if frame != self._frame:
t = self.convert( frame )
return t._jd + t._seconds / 86400.0
#-----------------------------------------------------------------------
def secondsPast( self, frame, jd ):
t = self
if frame != self._frame:
t = self.convert( frame )
delta = t._jd - jd
return t._seconds + delta * 86400
#-----------------------------------------------------------------------
def __cmp__( self, rhs ):
"""Compare two Epoch's.
= INPUT VARIABLES
- rhs The Epoch to compare against.
= RETURN VALUE
- Returns -1 if self < rhs, 0 if self == rhs, +1 if self > rhs.
"""
t = self
if self._frame != rhs._frame:
t = self.convert( rhs._frame )
if t._jd != rhs._jd:
return cmp( t._jd, rhs._jd )
return cmp( t._seconds, rhs._seconds )
#-----------------------------------------------------------------------
def __add__( self, rhs ):
"""Add a duration to an Epoch.
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Epoch.
"""
t = self
if self._frame != rhs.frame():
t = self.convert( rhs._frame )
sec = t._seconds + rhs.seconds()
return Epoch( t._frame, sec, t._jd )
#-----------------------------------------------------------------------
def __sub__( self, rhs ):
"""Subtract two Epoch's or a Duration from an Epoch.
Valid:
Duration = Epoch - Epoch
Epoch = Epoch - Duration
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns either the duration between to Epoch's or the a new
Epoch that is the result of subtracting a duration from an epoch.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Handle Epoch - Duration
if isinstance( rhs, U.Duration ):
return self + -rhs
t = self
if self._frame != rhs._frame:
t = self.convert( rhs._frame )
days = t._jd - rhs._jd
sec = t._seconds - rhs._seconds
return U.Duration( rhs._frame, days*86400 + sec )
#-----------------------------------------------------------------------
def __str__( self ):
"""Print the Epoch."""
return "%22.15e %s" % ( self.julianDate( self._frame ), self._frame )
#-----------------------------------------------------------------------
def __repr__( self ):
"""Print the Epoch."""
return str( self )
#-----------------------------------------------------------------------
def range( start, stop, step ):
"""Generate a range of Epoch objects.
Similar to the Python range() method. Returns the range [
start, stop ) at the requested step. Each element will be a
Epoch object.
= INPUT VARIABLES
- start The starting value of the range.
- stop The stop value of the range.
- step Step to use.
= RETURN VALUE
- Returns a list contianing the requested Epoch values.
"""
elems = []
i = 0
while True:
d = start + i * step
if d >= stop:
break
elems.append( d )
i += 1
return elems
range = staticmethod( range )
#===========================================================================
| mit |
jmmease/pandas | pandas/tests/io/formats/test_eng_formatting.py | 22 | 8085 | import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.compat import u
import pandas.io.formats.format as fmt
from pandas.util import testing as tm
class TestEngFormatter(object):
def test_eng_float_formatter(self):
df = DataFrame({'A': [1.41, 141., 14100, 1410000.]})
fmt.set_eng_float_format()
result = df.to_string()
expected = (' A\n'
'0 1.410E+00\n'
'1 141.000E+00\n'
'2 14.100E+03\n'
'3 1.410E+06')
assert result == expected
fmt.set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = (' A\n'
'0 1.410\n'
'1 141.000\n'
'2 14.100k\n'
'3 1.410M')
assert result == expected
fmt.set_eng_float_format(accuracy=0)
result = df.to_string()
expected = (' A\n'
'0 1E+00\n'
'1 141E+00\n'
'2 14E+03\n'
'3 1E+06')
assert result == expected
tm.reset_display_options()
def compare(self, formatter, input, output):
formatted_input = formatter(input)
assert formatted_input == output
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [
(f * 10 ** -24, " 1.414y"), (f * 10 ** -23, " 14.142y"),
(f * 10 ** -22, " 141.421y"), (f * 10 ** -21, " 1.414z"),
(f * 10 ** -20, " 14.142z"), (f * 10 ** -19, " 141.421z"),
(f * 10 ** -18, " 1.414a"), (f * 10 ** -17, " 14.142a"),
(f * 10 ** -16, " 141.421a"), (f * 10 ** -15, " 1.414f"),
(f * 10 ** -14, " 14.142f"), (f * 10 ** -13, " 141.421f"),
(f * 10 ** -12, " 1.414p"), (f * 10 ** -11, " 14.142p"),
(f * 10 ** -10, " 141.421p"), (f * 10 ** -9, " 1.414n"),
(f * 10 ** -8, " 14.142n"), (f * 10 ** -7, " 141.421n"),
(f * 10 ** -6, " 1.414u"), (f * 10 ** -5, " 14.142u"),
(f * 10 ** -4, " 141.421u"), (f * 10 ** -3, " 1.414m"),
(f * 10 ** -2, " 14.142m"), (f * 10 ** -1, " 141.421m"),
(f * 10 ** 0, " 1.414"), (f * 10 ** 1, " 14.142"),
(f * 10 ** 2, " 141.421"), (f * 10 ** 3, " 1.414k"),
(f * 10 ** 4, " 14.142k"), (f * 10 ** 5, " 141.421k"),
(f * 10 ** 6, " 1.414M"), (f * 10 ** 7, " 14.142M"),
(f * 10 ** 8, " 141.421M"), (f * 10 ** 9, " 1.414G"),
(f * 10 ** 10, " 14.142G"), (f * 10 ** 11, " 141.421G"),
(f * 10 ** 12, " 1.414T"), (f * 10 ** 13, " 14.142T"),
(f * 10 ** 14, " 141.421T"), (f * 10 ** 15, " 1.414P"),
(f * 10 ** 16, " 14.142P"), (f * 10 ** 17, " 141.421P"),
(f * 10 ** 18, " 1.414E"), (f * 10 ** 19, " 14.142E"),
(f * 10 ** 20, " 141.421E"), (f * 10 ** 21, " 1.414Z"),
(f * 10 ** 22, " 14.142Z"), (f * 10 ** 23, " 141.421Z"),
(f * 10 ** 24, " 1.414Y"), (f * 10 ** 25, " 14.142Y"),
(f * 10 ** 26, " 141.421Y")]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [
(f * 10 ** -24, " 3.1416E-24"),
(f * 10 ** -23, " 31.4159E-24"),
(f * 10 ** -22, " 314.1593E-24"),
(f * 10 ** -21, " 3.1416E-21"),
(f * 10 ** -20, " 31.4159E-21"),
(f * 10 ** -19, " 314.1593E-21"),
(f * 10 ** -18, " 3.1416E-18"),
(f * 10 ** -17, " 31.4159E-18"),
(f * 10 ** -16, " 314.1593E-18"),
(f * 10 ** -15, " 3.1416E-15"),
(f * 10 ** -14, " 31.4159E-15"),
(f * 10 ** -13, " 314.1593E-15"),
(f * 10 ** -12, " 3.1416E-12"),
(f * 10 ** -11, " 31.4159E-12"),
(f * 10 ** -10, " 314.1593E-12"),
(f * 10 ** -9, " 3.1416E-09"),
(f * 10 ** -8, " 31.4159E-09"),
(f * 10 ** -7, " 314.1593E-09"),
(f * 10 ** -6, " 3.1416E-06"),
(f * 10 ** -5, " 31.4159E-06"),
(f * 10 ** -4, " 314.1593E-06"),
(f * 10 ** -3, " 3.1416E-03"),
(f * 10 ** -2, " 31.4159E-03"),
(f * 10 ** -1, " 314.1593E-03"),
(f * 10 ** 0, " 3.1416E+00"),
(f * 10 ** 1, " 31.4159E+00"),
(f * 10 ** 2, " 314.1593E+00"),
(f * 10 ** 3, " 3.1416E+03"),
(f * 10 ** 4, " 31.4159E+03"),
(f * 10 ** 5, " 314.1593E+03"),
(f * 10 ** 6, " 3.1416E+06"),
(f * 10 ** 7, " 31.4159E+06"),
(f * 10 ** 8, " 314.1593E+06"),
(f * 10 ** 9, " 3.1416E+09"),
(f * 10 ** 10, " 31.4159E+09"),
(f * 10 ** 11, " 314.1593E+09"),
(f * 10 ** 12, " 3.1416E+12"),
(f * 10 ** 13, " 31.4159E+12"),
(f * 10 ** 14, " 314.1593E+12"),
(f * 10 ** 15, " 3.1416E+15"),
(f * 10 ** 16, " 31.4159E+15"),
(f * 10 ** 17, " 314.1593E+15"),
(f * 10 ** 18, " 3.1416E+18"),
(f * 10 ** 19, " 31.4159E+18"),
(f * 10 ** 20, " 314.1593E+18"),
(f * 10 ** 21, " 3.1416E+21"),
(f * 10 ** 22, " 31.4159E+21"),
(f * 10 ** 23, " 314.1593E+21"),
(f * 10 ** 24, " 3.1416E+24"),
(f * 10 ** 25, " 31.4159E+24"),
(f * 10 ** 26, " 314.1593E+24")]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [(5.55555, ' 5.556'), (55.5555, ' 55.556'),
(555.555, ' 555.555'), (5555.55, ' 5.556k'),
(55555.5, ' 55.556k'), (555555, ' 555.555k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [(5.55555, ' 5.6'), (55.5555, ' 55.6'), (555.555, ' 555.6'),
(5555.55, ' 5.6k'), (55555.5, ' 55.6k'), (555555, ' 555.6k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [(5.55555, ' 6'), (55.5555, ' 56'), (555.555, ' 556'),
(5555.55, ' 6k'), (55555.5, ' 56k'), (555555, ' 556k')]
self.compare_all(formatter, in_out)
formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
assert result == u(' 0.000')
def test_nan(self):
# Issue #11981
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.nan)
assert result == u('NaN')
df = pd.DataFrame({'a': [1.5, 10.3, 20.5],
'b': [50.3, 60.67, 70.12],
'c': [100.2, 101.33, 120.33]})
pt = df.pivot_table(values='a', index='b', columns='c')
fmt.set_eng_float_format(accuracy=1)
result = pt.to_string()
assert 'NaN' in result
tm.reset_display_options()
def test_inf(self):
# Issue #11981
formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.inf)
assert result == u('inf')
| bsd-3-clause |
RoyNexus/python | Market Sim (3rd4th Homework)/marketsim.py | 1 | 7615 | # QSTK Imports
import QSTK.qstkutil.qsdateutil as du
#import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import math as math
import pandas as pd
import numpy as np
import sys as sys
import csv as csv
from order import Order
from metrics import Metrics
DATETIME = 0
VALUE = 1
DEFAULT_CASH = 100000
def read_arguments(arguments):
if len(arguments) == 3:
return arguments[0], arguments[1], arguments[2]
else:
print 'Incorrect number of arguments, assume default arguments'
# raise Exception
return DEFAULT_CASH, "orders.csv", "values.csv"
def read_orders(file):
reader = csv.reader(open(file, 'rU'), delimiter=',')
orders_array = ['YEAR', 'MONTH', 'DAY', 'SYMBOL', 'ORDER', 'SHARES']
for row in reader:
orders_array = np.vstack([orders_array, np.delete(row, [6], axis=0)])
orders_array = np.delete(orders_array, [0], axis=0)
return orders_array
def get_orders_object(orders):
result = []
for order in orders:
orderClass = Order(order)
result.append(orderClass)
return result
def get_symbols_and_dates(orders):
symbols = []
dates = []
for order in orders:
orderClass = Order(order)
symbols.append(orderClass.get_symbol())
dates.append(orderClass.get_date())
return list(set(symbols)), dates
def get_close_prices(start_date, end_date, symbols):
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(start_date, end_date, dt_timeofday)
c_dataobj = da.DataAccess('Yahoo', cachestalltime=0)
ls_keys = ['close']
ldf_data = c_dataobj.get_data(ldt_timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return d_data['close']
def checkForOrders(orderObjects, currentDate):
result = []
for order in orderObjects:
if (order.get_date_16_00() == currentDate):
result.append(order)
return result
def get_current_price(prices, symbol, current_date):
result = 0
pricesSymbol = prices[symbol]
if (not (math.isnan(pricesSymbol[current_date]))):
result = pricesSymbol[current_date]
return result
def get_sym_pos(symbols, symbol):
result = 0
for x in xrange(0, len(symbols)):
if (symbols[x]) == symbol:
result = x
return result
def calc_cash(order, currentCash, prices):
result = 0
if (order.get_type() == 'Buy'):
result = currentCash - (int(order.get_shares()) * float(get_current_price(prices, order.get_symbol(), order.get_date_16_00())))
else:
result = currentCash + (int(order.get_shares()) * float(get_current_price(prices, order.get_symbol(), order.get_date_16_00())))
return result
def calc_portfolio(order, currentPortfolio):
result = 0
if (order.get_type() == 'Buy'):
result = currentPortfolio + int(order.get_shares())
else:
result = currentPortfolio - int(order.get_shares())
return result
def update_cash_forward(resultMatrix, index, lenCashMatrix):
updatedValue = resultMatrix[VALUE, index]
for x in xrange(index, lenCashMatrix):
resultMatrix[VALUE, x] = updatedValue
return resultMatrix
def update_portfolio_forward(resultMatrix, index, lenPortfolioMatrix):
for idxSym in xrange(0, len(resultMatrix[VALUE, index])):
updatedValue = resultMatrix[VALUE, index][idxSym]
for x in xrange(index, lenPortfolioMatrix):
resultMatrix[VALUE, x][idxSym] = updatedValue
return resultMatrix
def applyPricesToSymbols(currentValues, prices, symbols, currentDate):
for x in xrange(0, len(symbols)):
currentValues[x] = currentValues[x] * get_current_price(prices, symbols[x], currentDate)
return currentValues
def applyCurrentDatePrices(resultMatrix, prices, symbols):
lenPortfolioMatrix = len(prices.values)
for x in xrange(0, lenPortfolioMatrix):
resultMatrix[VALUE, x] = applyPricesToSymbols(resultMatrix[VALUE, x], prices, symbols, resultMatrix[DATETIME, x])
return resultMatrix
def calculate_portfolio(symbols, prices, orders):
lenPortfolioMatrix = len(prices.values)
portfolio_matrix = np.zeros((lenPortfolioMatrix, len(symbols)))
datetimesIndex = np.array(prices.index.to_pydatetime())
resultMatrix = np.array([datetimesIndex, portfolio_matrix])
orderObjects = get_orders_object(orders)
for x in xrange(0, lenPortfolioMatrix):
dayOrders = checkForOrders(orderObjects, resultMatrix[DATETIME, x])
for dayOrder in dayOrders:
resultMatrix[VALUE, x][get_sym_pos(symbols, dayOrder.get_symbol())] = calc_portfolio(dayOrder, resultMatrix[VALUE, x][get_sym_pos(symbols, dayOrder.get_symbol())])
resultMatrix = update_portfolio_forward(resultMatrix, x, lenPortfolioMatrix)
resultMatrix = applyCurrentDatePrices(resultMatrix, prices, symbols)
return resultMatrix
def calculate_cash(initial_cash, prices, orders):
lenCashMatrix = len(prices.values)
cash_matrix = np.zeros(lenCashMatrix)
cash_matrix.fill(initial_cash)
datetimesIndex = np.array(prices.index.to_pydatetime())
resultMatrix = np.array([datetimesIndex, cash_matrix])
orderObjects = get_orders_object(orders)
for x in xrange(0, lenCashMatrix):
dayOrders = checkForOrders(orderObjects, resultMatrix[DATETIME, x])
for dayOrder in dayOrders:
resultMatrix[VALUE, x] = calc_cash(dayOrder, resultMatrix[VALUE, x], prices)
resultMatrix = update_cash_forward(resultMatrix, x, lenCashMatrix)
#print resultMatrix[VALUE]
return resultMatrix
def sum_portfolio_values(valueInCurrentDate):
result = 0
for value in valueInCurrentDate:
result = result + value
return result
def sum_cash_and_portfolio(cash, portfolio, size):
for x in xrange(0, size):
cash[VALUE, x] = cash[VALUE, x] + sum_portfolio_values(portfolio[VALUE, x])
return cash
def write_output_file(filename, values, size):
writer = csv.writer(open(filename, 'wb'), delimiter=',')
for x in xrange(0, size):
row_to_enter = [str(values[DATETIME, x]), str(values[VALUE, x])]
writer.writerow(row_to_enter)
def main(arguments):
try:
initial_cash, orders_file, values_file = read_arguments(arguments)
except Exception:
print 'Call example: marketsim.py 1000000 orders.csv values.csv'
print "Initial amount of cash: " + str(initial_cash) + "$"
print "Reading from input file: " + str(orders_file)
orders_array = read_orders(orders_file)
symbols_list, dates_list = get_symbols_and_dates(orders_array)
dates_list.sort()
prices = get_close_prices(dates_list[0], dates_list[len(dates_list)-1] + dt.timedelta(days=1), symbols_list)
cash = calculate_cash(initial_cash, prices, orders_array)
portfolio = calculate_portfolio(symbols_list, prices, orders_array)
total_values_by_date = sum_cash_and_portfolio(cash, portfolio, len(prices.values))
print "Writing into output file: " + str(values_file)
write_output_file(values_file, total_values_by_date, len(prices.values))
print "Details of portfolio\n"
metrics = Metrics(total_values_by_date[VALUE])
print "Sharpe Ratio of Fund: " + str(metrics.get_sharpe_ratio())
print "Total Return of Fund: " + str(metrics.get_cumulative_return())
print "Standard Deviation of Fund: " + str(metrics.get_daily_std())
print "Average Daily Return of Fund: " + str(metrics.get_daily_return())
if __name__ == '__main__':
main(sys.argv) | unlicense |
jhillairet/ICRH | WEST_design/plot_limitations.py | 2 | 3293 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 21:39:58 2015
In this script we plot the maximum coupled power limits vs coupling resistance.
The limitations come from the maximum current permitted in capacitors and the
maximum voltage. In fact, all the limits are due to the current limits only.
@author: hash
"""
import pandas as pd
from matplotlib.pylab import *
from scipy.optimize import curve_fit
# open the excel sheet with pandas
# The data comes from the final antenna model made with Designer
data = pd.read_excel(io='./data/TOPICA/ToreSupra_WEST/WEST_ICRH_Compilation_resultats_designer.xlsx',
sheetname=0)
#The information we want to plot correspond to the coupling resistance
#and the maximum power
Rc = data['Plasma Model Coupling Resistance [Ohm] (calculated)'].values
Pmax = data['Worse Power Limit [MW]'].values
# some data are missing : the Pmax value is either 0 or nan.
# Filter those data
idx = pd.notnull(Pmax) * (Pmax > 0)
Rc = Rc[idx]
Pmax = Pmax[idx]
# The max power is given for 1/2 antenna.
# We multiply to get the total power for three antennas
Pmax = 3*2*Pmax
# plot the raw data, just to see
figure(1)
clf()
plot(Rc, Pmax, '.')
# these data comes from two kinds of matching strategy : either match for a real
# impedance (the one of the feeder, almost 30 Ohms) or match for a complex impedance,
# adding an imaginary part which will increase the current (and symmetrize them as well)
# at the depends of an increase of the VSWR for the generator.
# Let's filter these two set of data.
Zmatch = data[u'Matching Impedace Target [Ohm]'].values
strategy1 = Zmatch[idx] == '29.74 - 0j'
strategy2 = Zmatch[idx] == '29.74 - 15j'
figure(2)
clf()
plot(Rc[strategy1], Pmax[strategy1], 'ko', ms=7, label='VSWR=1:1')
plot(Rc[strategy2], Pmax[strategy2], 'ks', ms=7, label='VSWR<1.7:1')
_Rc = np.linspace(0.01, 4, 101)
def func(x, a, b, c):
return a*(x-0.1)**b
popt, pcov = curve_fit(func, Rc[strategy1], Pmax[strategy1])
_Pmax_stgy1 = func(_Rc, *popt)
popt, pcov = curve_fit(func, Rc[strategy2], Pmax[strategy2])
_Pmax_stgy2 = func(_Rc, *popt)
#plot(_Rc, _Pmax_stgy1)
#plot(_Rc, _Pmax_stgy2)
fill_between(_Rc, _Pmax_stgy1, _Pmax_stgy2, alpha=0.2)
xlim(0.1, 3)
ylim(0, 2*3*2)
xlabel('Coupling Resistance [$\Omega$]', fontsize=16)
ylabel('Maximum Coupled RF Power [MW]', fontsize=16)
xticks(fontsize=14)
yticks(fontsize=14)
grid(True)
# H-mode coupling resistance range
#gca().add_patch(Rectangle((0.39, 0), 1.89-0.39, 12,
# facecolor=[.1,.1,.1], alpha=0.1))
annotate('H-mode', xy=(1,1), xytext=(1, 8.5), fontsize=16)
annotate('', (0.39, 8), (1.89, 8), arrowprops=dict(arrowstyle='<->', linewidth=2))
# L-mode coupling resistance range
#gca().add_patch(Rectangle((1.06, 0), 2.91-1.06, 12,
# facecolor=[.1,.1,.1], alpha=0.1))
annotate('L-mode', xy=(1,1), xytext=(1.9, 10.5), fontsize=16)
annotate('', (1.06, 10), (2.91, 10), arrowprops=dict(arrowstyle='<->', linewidth=2))
# Fill beetween strategy zones
fill_between(_Rc, _Pmax_stgy1, alpha=0.2, color= 'g')
fill_between(_Rc, _Pmax_stgy1, _Pmax_stgy2, alpha=0.1, color='r')
# only one marker in the legend (double marker per default...)
legend(numpoints=1, loc='lower right')
savefig('WEST_ICRH_MaximumPower_vs_Rc.png', dpi=600) | mit |
danielforsyth/keras | tests/manual/check_callbacks.py | 82 | 7540 | import numpy as np
import random
import theano
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
import keras.callbacks as cbks
from matplotlib import pyplot as plt
from matplotlib import animation
##############################
# model DrawActivations test #
##############################
print('Running DrawActivations test')
nb_classes = 10
batch_size = 128
nb_epoch = 10
max_train_samples = 512
max_test_samples = 1
np.random.seed(1337)
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(-1,1,28,28)[:max_train_samples]
X_train = X_train.astype("float32")
X_train /= 255
X_test = X_test.reshape(-1,1,28,28)[:max_test_samples]
X_test = X_test.astype("float32")
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
class Frames(object):
def __init__(self, n_plots=16):
self._n_frames = 0
self._framedata = []
self._titles = []
for i in range(n_plots):
self._framedata.append([])
def add_frame(self, i, frame):
self._framedata[i].append(frame)
def set_title(self, title):
self._titles.append(title)
class SubplotTimedAnimation(animation.TimedAnimation):
def __init__(self, fig, frames, grid=(4, 4), interval=10, blit=False, **kwargs):
self.n_plots = grid[0] * grid[1]
self.axes = [fig.add_subplot(grid[0], grid[1], i + 1) for i in range(self.n_plots)]
for axis in self.axes:
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
self.frames = frames
self.imgs = [self.axes[i].imshow(frames._framedata[i][0], interpolation='nearest', cmap='bone') for i in range(self.n_plots)]
self.title = fig.suptitle('')
super(SubplotTimedAnimation, self).__init__(fig, interval=interval, blit=blit, **kwargs)
def _draw_frame(self, j):
for i in range(self.n_plots):
self.imgs[i].set_data(self.frames._framedata[i][j])
if len(self.frames._titles) > j:
self.title.set_text(self.frames._titles[j])
self._drawn_artists = self.imgs
def new_frame_seq(self):
return iter(range(len(self.frames._framedata[0])))
def _init_draw(self):
for img in self.imgs:
img.set_data([[]])
def combine_imgs(imgs, grid=(1,1)):
n_imgs, img_h, img_w = imgs.shape
if n_imgs != grid[0] * grid[1]:
raise ValueError()
combined = np.zeros((grid[0] * img_h, grid[1] * img_w))
for i in range(grid[0]):
for j in range(grid[1]):
combined[img_h*i:img_h*(i+1),img_w*j:img_w*(j+1)] = imgs[grid[0] * i + j]
return combined
class DrawActivations(Callback):
def __init__(self, figsize):
self.fig = plt.figure(figsize=figsize)
def on_train_begin(self, logs={}):
self.imgs = Frames(n_plots=5)
layers_0_ids = np.random.choice(32, 16, replace=False)
self.test_layer0 = theano.function([self.model.get_input()], self.model.layers[1].get_output(train=False)[0, layers_0_ids])
layers_1_ids = np.random.choice(64, 36, replace=False)
self.test_layer1 = theano.function([self.model.get_input()], self.model.layers[5].get_output(train=False)[0, layers_1_ids])
self.test_layer2 = theano.function([self.model.get_input()], self.model.layers[10].get_output(train=False)[0])
def on_epoch_begin(self, epoch, logs={}):
self.epoch = epoch
def on_batch_end(self, batch, logs={}):
if batch % 5 == 0:
self.imgs.add_frame(0, X_test[0,0])
self.imgs.add_frame(1, combine_imgs(self.test_layer0(X_test), grid=(4, 4)))
self.imgs.add_frame(2, combine_imgs(self.test_layer1(X_test), grid=(6, 6)))
self.imgs.add_frame(3, self.test_layer2(X_test).reshape((16,16)))
self.imgs.add_frame(4, self.model._predict(X_test)[0].reshape((1,10)))
self.imgs.set_title('Epoch #%d - Batch #%d' % (self.epoch, batch))
def on_train_end(self, logs={}):
anim = SubplotTimedAnimation(self.fig, self.imgs, grid=(1,5), interval=10, blit=False, repeat_delay=1000)
# anim.save('test_gif.gif', fps=15, writer='imagemagick')
plt.show()
# model = Sequential()
# model.add(Dense(784, 50))
# model.add(Activation('relu'))
# model.add(Dense(50, 10))
# model.add(Activation('softmax'))
model = Sequential()
model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, 10, W_regularizer = l2(0.1)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Fit the model
draw_weights = DrawActivations(figsize=(5.4, 1.35))
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, callbacks=[draw_weights])
##########################
# model checkpoint tests #
##########################
print('Running ModelCheckpoint test')
nb_classes = 10
batch_size = 128
nb_epoch = 20
# small sample size to overfit on training data
max_train_samples = 50
max_test_samples = 1000
np.random.seed(1337) # for reproducibility
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000,784)[:max_train_samples]
X_test = X_test.reshape(10000,784)[:max_test_samples]
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]
# Create a slightly larger network than required to test best validation save only
model = Sequential()
model.add(Dense(784, 500))
model.add(Activation('relu'))
model.add(Dense(500, 10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# test file location
path = "/tmp"
filename = "model_weights.hdf5"
import os
f = os.path.join(path, filename)
print("Test model checkpointer")
# only store best validation model in checkpointer
checkpointer = cbks.ModelCheckpoint(filepath=f, verbose=1, save_best_only=True)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test), callbacks =[checkpointer])
if not os.path.isfile(f):
raise Exception("Model weights were not saved to %s" % (f))
print("Test model checkpointer without validation data")
import warnings
warnings.filterwarnings('error')
try:
# this should issue a warning
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, callbacks =[checkpointer])
except:
print("Tests passed")
import sys
sys.exit(0)
raise Exception("Modelcheckpoint tests did not pass")
| mit |
Hezi-Resheff/location-based-behav | loc-vs-acc/location/tables.py | 1 | 4881 | import pandas as pd
import numpy as np
import os
from location import trajectory_processor
from settings import DATA_ROOT
def compare_behav_types(data_file, min_sampels=2000, r=1, hard_max=3):
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animals = animal_data["bird_id"].unique()
out = {}
outn = {} # normalized
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
data = trajectory_processor(data, stamp=False).compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
pivot = pd.pivot_table(data, values=["bird_id"], index=["behav"], columns=["cluster"], aggfunc=pd.DataFrame.count)
pivotn = pivot.apply(lambda col: col/col.sum()*100, axis=0) # normalized per column (cluster)
out[animal] = pivot
outn[animal] = pivotn
print(pivot, pivotn)
panel = pd.Panel.from_dict(out)
paneln = pd.Panel.from_dict(outn)
return panel, paneln
def marginals_etc(data_file, min_sampels=2000, r=1, hard_max=3):
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animal_data.ODBA = animal_data.ODBA.replace("\\N", np.NaN).apply(float)
animals = animal_data["bird_id"].unique()
time = {}
distance_cluster = {}
distance_behav = {}
odba_cluster = {}
odba_behav = {}
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
data = trajectory_processor(data, stamp=False).compute_steps().compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
time[animal] = data["time"].groupby(data["cluster"]).sum()
distance_cluster[animal] = data["dist"].groupby(data["cluster"]).mean()
distance_behav[animal] = data["dist"].groupby(data["behav"]).mean()
odba_cluster[animal] = data["ODBA"].groupby(data["cluster"]).mean()
odba_behav[animal] = data["ODBA"].groupby(data["behav"]).mean()
print([d[animal] for d in [time, distance_cluster, distance_behav, odba_cluster, odba_behav]])
return time, distance_cluster, distance_behav, odba_cluster, odba_behav
def data_with_fpt_mode(data_file, min_sampels=2000, r=1, hard_max=3):
""" Add the FPT behavioral mode to the entire data """
path = os.path.join(DATA_ROOT, data_file)
animal_data = pd.DataFrame.from_csv(path, parse_dates=["stamp"])
animal_data.behav = animal_data.behav.replace("\\N", -1).apply(int) # Clean animal behav and add the unknown==-1 style
animal_data.ODBA = animal_data.ODBA.replace("\\N", np.NaN).apply(float)
animals = animal_data["bird_id"].unique()
def animaliter():
for animal in animals:
data = animal_data.loc[animal_data.bird_id == animal].copy()
print(animal)
if len(data) < min_sampels:
continue
yield data
frames = [trajectory_processor(data, stamp=False).compute_steps().compute_first_passage(r, hard_max=hard_max).clean_day_end().cluster("FPT_{}".format(r), k=3)
for data in animaliter()]
return pd.concat(frames).reset_index(drop=True)
if __name__ == "__main__":
data_file = "Storks_Africa__10_to_12_2012__with_behav__ALL.csv"
opt = "add-fpt-modes"
if opt == "compare-behav":
# Compare behav types
p, pn = compare_behav_types(data_file)
p.to_pickle(os.path.join(DATA_ROOT, "out", "compare_behav_types__panel(r=1-max=3h).pkl"))
pn.to_pickle(os.path.join(DATA_ROOT, "out", "compare_behav_types__panel__normalized(r=1-max=3h).pkl"))
elif opt == "marginals":
# Marginals
time, distance_cluster, distance_behav, odba_cluster, odba_behav = marginals_etc(data_file)
# save
for p_list in ('time', 'distance_cluster', 'distance_behav', 'odba_cluster', 'odba_behav'):
pd.DataFrame(eval(p_list)).to_csv(os.path.join(DATA_ROOT, "out", "marginals", "{}.csv".format(p_list)))
elif opt == "add-fpt-modes":
data_with_fpt_mode(data_file).to_csv(os.path.join(DATA_ROOT, "Storks_Africa__10_to_12_2012__with_behav__ALL__FPT.csv"))
else:
print("Nothing to do. Good night :)") | mit |
mrcslws/htmresearch | htmresearch/algorithms/sparse_net.py | 12 | 16191 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Implementation of Bruno Olshausen's sparse coding algorithm.
It relies on the formulation developed by Olshausen and Field (1996), slightly
modified so that it uses a Locally Competitive Algorithm (LCA) to compute the
coefficients, rather than a vanilla gradient descent, as proposed by Rozell
et al. (2008).
The algorithm expresses image patches on a basis of filter functions, with the
constraint that only a few of the coefficients on this basis (also called
activations) are non-zero. Thus, it tries to find the basis decomposition such
that the image projection is as close as possible to the original image,
with as few non-zero activations as possible.
The algorithm for solving the resulting objective function solves a dynamical
system by using thresholding functions to induce local competition between
dimensions.
"""
import random
from abc import ABCMeta, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
EPSILON = 0.000001
class SparseNet(object):
"""
Base class for SparseNet implementation, which provides public methods for
training and encoding data, as well as methods for plotting the network's
basis and loss history.
The method to get data batches must be implemented in sub-classes specific to
each data type.
"""
__metaclass__ = ABCMeta
def __init__(self,
filterDim=64,
outputDim=64,
batchSize=100,
numLcaIterations=75,
learningRate=2.0,
decayCycle=100,
learningRateDecay=1.0,
lcaLearningRate=0.1,
thresholdDecay=0.95,
minThreshold=0.1,
thresholdType='soft',
verbosity=0,
showEvery=500,
seed=42):
"""
Initializes the SparseNet.
:param filterDim: (int) (Flattened) dimension of filters
:param outputDim: (int) Output dimension
:param batchSize: (int) Batch size for training
:param numLcaIterations: (int) Number of iterations in LCA
:param learningRate: (float) Learning rate
:param decayCycle: (int) Number of iterations between decays
:param learningRateDecay (float) Learning rate decay rate
:param lcaLearningRate (float) Learning rate in LCA
:param minThreshold: (float) Minimum activation threshold
during decay
:param verbosity: (int) Verbosity level
:param seed: (int) Seed for random number generators
"""
self.filterDim = filterDim
self.outputDim = outputDim
self.batchSize = batchSize
self._reset()
# training parameters
self.learningRate = learningRate
self.decayCycle = decayCycle
self.learningRateDecay = learningRateDecay
# LCA parameters
self.numLcaIterations = numLcaIterations
self.lcaLearningRate = lcaLearningRate
self.thresholdDecay = thresholdDecay
self.minThreshold = minThreshold
self.thresholdType = thresholdType
# debugging
self.verbosity = verbosity
self.showEvery = showEvery
self.seed = seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
def train(self, inputData, numIterations, reset=False):
"""
Trains the SparseNet, with the provided data.
The reset parameter can be set to False if the network should not be
reset before training (for example for continuing a previous started
training).
:param inputData: (array) Input data, of dimension (inputDim, numPoints)
:param numIterations: (int) Number of training iterations
:param reset: (bool) If set to True, reset basis and history
"""
if not isinstance(inputData, np.ndarray):
inputData = np.array(inputData)
if reset:
self._reset()
for _ in xrange(numIterations):
self._iteration += 1
batch = self._getDataBatch(inputData)
# check input dimension, change if necessary
if batch.shape[0] != self.filterDim:
raise ValueError("Batches and filter dimesions don't match!")
activations = self.encode(batch)
self._learn(batch, activations)
if self._iteration % self.decayCycle == 0:
self.learningRate *= self.learningRateDecay
if self.verbosity >= 1:
self.plotLoss()
self.plotBasis()
def encode(self, data, flatten=False):
"""
Encodes the provided input data, returning a sparse vector of activations.
It solves a dynamic system to find optimal activations, as proposed by
Rozell et al. (2008).
:param data: (array) Data to be encoded (single point or multiple)
:param flatten (bool) Whether or not the data needs to be flattened,
in the case of images for example. Does not
need to be enabled during training.
:return: (array) Array of sparse activations (dimOutput,
numPoints)
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
# flatten if necessary
if flatten:
try:
data = np.reshape(data, (self.filterDim, data.shape[-1]))
except ValueError:
# only one data point
data = np.reshape(data, (self.filterDim, 1))
if data.shape[0] != self.filterDim:
raise ValueError("Data does not have the correct dimension!")
# if single data point, convert to 2-dimensional array for consistency
if len(data.shape) == 1:
data = data[:, np.newaxis]
projection = self.basis.T.dot(data)
representation = self.basis.T.dot(self.basis) - np.eye(self.outputDim)
states = np.zeros((self.outputDim, data.shape[1]))
threshold = 0.5 * np.max(np.abs(projection), axis=0)
activations = self._thresholdNonLinearity(states, threshold)
for _ in xrange(self.numLcaIterations):
# update dynamic system
states *= (1 - self.lcaLearningRate)
states += self.lcaLearningRate * (projection - representation.dot(activations))
activations = self._thresholdNonLinearity(states, threshold)
# decay threshold
threshold *= self.thresholdDecay
threshold[threshold < self.minThreshold] = self.minThreshold
return activations
def plotLoss(self, filename=None):
"""
Plots the loss history.
:param filename (string) Can be provided to save the figure
"""
plt.figure()
plt.plot(self.losses.keys(), self.losses.values())
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.title("Learning curve for {}".format(self))
if filename is not None:
plt.savefig(filename)
def plotBasis(self, filename=None):
"""
Plots the basis functions, reshaped in 2-dimensional arrays.
This representation makes the most sense for visual input.
:param: filename (string) Can be provided to save the figure
"""
if np.floor(np.sqrt(self.filterDim)) ** 2 != self.filterDim:
print "Basis visualization is not available if filterDim is not a square."
return
dim = int(np.sqrt(self.filterDim))
if np.floor(np.sqrt(self.outputDim)) ** 2 != self.outputDim:
outDimJ = np.sqrt(np.floor(self.outputDim / 2))
outDimI = np.floor(self.outputDim / outDimJ)
if outDimI > outDimJ:
outDimI, outDimJ = outDimJ, outDimI
else:
outDimI = np.floor(np.sqrt(self.outputDim))
outDimJ = outDimI
outDimI, outDimJ = int(outDimI), int(outDimJ)
basis = - np.ones((1 + outDimI * (dim + 1), 1 + outDimJ * (dim + 1)))
# populate array with basis values
k = 0
for i in xrange(outDimI):
for j in xrange(outDimJ):
colorLimit = np.max(np.abs(self.basis[:, k]))
mat = np.reshape(self.basis[:, k], (dim, dim)) / colorLimit
basis[1 + i * (dim + 1) : 1 + i * (dim + 1) + dim, \
1 + j * (dim + 1) : 1 + j * (dim + 1) + dim] = mat
k += 1
plt.figure()
plt.subplot(aspect="equal")
plt.pcolormesh(basis)
plt.axis([0, 1 + outDimJ * (dim + 1), 0, 1 + outDimI * (dim + 1)])
# remove ticks
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.title("Basis functions for {0}".format(self))
if filename is not None:
plt.savefig(filename)
def _reset(self):
"""
Reinitializes basis functions, iteration number and loss history.
"""
self.basis = np.random.randn(self.filterDim, self.outputDim)
self.basis /= np.sqrt(np.sum(self.basis ** 2, axis=0))
self._iteration = 0
self.losses = {}
def _learn(self, batch, activations):
"""
Learns a single iteration on the provided batch and activations.
:param batch: (array) Training batch, of dimension (filterDim,
batchSize)
:param activations:(array) Computed activations, of dimension (outputDim,
batchSize)
"""
batchResiduals = batch - self.basis.dot(activations)
loss = np.mean(np.sqrt(np.sum(batchResiduals ** 2, axis=0)))
self.losses[self._iteration] = loss
if self.verbosity >= 2:
if self._iteration % self.showEvery == 0:
print "At iteration {0}, loss is {1:.3f}".format(self._iteration, loss)
# update basis
gradBasis = batchResiduals.dot(activations.T) / self.batchSize
self.basis += self.learningRate * gradBasis
# normalize basis
self.basis /= np.sqrt(np.sum(self.basis ** 2, axis=0))
def _thresholdNonLinearity(self, input, threshold, thresholdType=None):
"""
Non linearity function, to transform the activations during training and
encoding.
:param input: (array) Activations
:param threshold: (array) Thresholds
:param thresholdType: (string) 'soft', 'absoluteHard' or 'relativeHard'
"""
if thresholdType == None:
thresholdType = self.thresholdType
activation = np.copy(input)
if thresholdType == 'soft':
return np.maximum(np.abs(activation) - threshold, 0.) * np.sign(activation)
if thresholdType == 'absoluteHard':
activation[np.abs(activation) < threshold] = 0.
return activation
if thresholdType == 'relativeHard':
activation[activation < threshold] = 0.
return activation
@abstractmethod
def _getDataBatch(self, inputData):
"""
Returns an array of dimensions (filterDim, batchSize), to be used as
batch for training data.
Must be implemented in sub-classes specific to different data types
:param: inputData: (array) Array of dimension (inputDim, numPoints)
:returns: (array) Batch of dimension (filterDim, batchSize)
"""
@classmethod
def read(cls, proto):
"""
Reads deserialized data from proto object
:param proto: (DynamicStructBuilder) Proto object
:return (SparseNet) SparseNet instance
"""
sparsenet = object.__new__(cls)
sparsenet.filterDim = proto.filterDim
sparsenet.outputDim = proto.outputDim
sparsenet.batchSize = proto.batchSize
lossHistoryProto = proto.losses
sparsenet.losses = {}
for i in xrange(len(lossHistoryProto)):
sparsenet.losses[lossHistoryProto[i].iteration] = lossHistoryProto[i].loss
sparsenet._iteration = proto.iteration
sparsenet.basis = np.reshape(proto.basis, newshape=(sparsenet.filterDim,
sparsenet.outputDim))
# training parameters
sparsenet.learningRate = proto.learningRate
sparsenet.decayCycle = proto.decayCycle
sparsenet.learningRateDecay = proto.learningRateDecay
# LCA parameters
sparsenet.numLcaIterations = proto.numLcaIterations
sparsenet.lcaLearningRate = proto.lcaLearningRate
sparsenet.thresholdDecay = proto.thresholdDecay
sparsenet.minThreshold = proto.minThreshold
sparsenet.thresholdType = proto.thresholdType
# debugging
sparsenet.verbosity = proto.verbosity
sparsenet.showEvery = proto.showEvery
sparsenet.seed = int(proto.seed)
if sparsenet.seed is not None:
np.random.seed(sparsenet.seed)
random.seed(sparsenet.seed)
return sparsenet
def write(self, proto):
"""
Writes serialized data to proto object
:param proto: (DynamicStructBuilder) Proto object
"""
proto.filterDim = self.filterDim
proto.outputDim = self.outputDim
proto.batchSize = self.batchSize
lossHistoryProto = proto.init("losses", len(self.losses))
i = 0
for iteration, loss in self.losses.iteritems():
iterationLossHistoryProto = lossHistoryProto[i]
iterationLossHistoryProto.iteration = iteration
iterationLossHistoryProto.loss = float(loss)
i += 1
proto.iteration = self._iteration
proto.basis = list(
self.basis.flatten().astype(type('float', (float,), {}))
)
# training parameters
proto.learningRate = self.learningRate
proto.decayCycle = self.decayCycle
proto.learningRateDecay = self.learningRateDecay
# LCA parameters
proto.numLcaIterations = self.numLcaIterations
proto.lcaLearningRate = self.lcaLearningRate
proto.thresholdDecay = self.thresholdDecay
proto.minThreshold = self.minThreshold
proto.thresholdType = self.thresholdType
# debugging
proto.verbosity = self.verbosity
proto.showEvery = self.showEvery
proto.seed = self.seed
def __eq__(self, other):
"""
:param other: (SparseNet) Other SparseNet to compare to
:return: (bool) True if both networks are equal
"""
if self.filterDim != other.filterDim:
return False
if self.outputDim != other.outputDim:
return False
if self._iteration != other._iteration:
return False
for iteration, loss in self.losses.iteritems():
if iteration not in other.losses:
return False
if abs(loss - other.losses[iteration]) > EPSILON:
return False
if np.mean(np.abs(self.basis - other.basis)) > EPSILON:
return False
if self.learningRate != other.learningRate:
return False
if self.decayCycle != other.decayCycle:
return False
if self.learningRateDecay != other.learningRateDecay:
return False
if self.numLcaIterations != other.numLcaIterations:
return False
if self.lcaLearningRate != other.lcaLearningRate:
return False
if self.thresholdDecay != other.thresholdDecay:
return False
if self.minThreshold != other.minThreshold:
return False
if self.thresholdType != other.thresholdType:
return False
if self.seed != other.seed:
return False
return True
def __ne__(self, other):
"""
:param other: (SparseNet) Other SparseNet to compare to
:return: (bool) True if both networks are not equal
"""
return not self == other
def __repr__(self):
"""
Custom representation method.
"""
className = self.__class__.__name__
return className + "({0}, {1})".format(self.filterDim, self.outputDim)
| agpl-3.0 |
ananthamurthy/eyeBlinkBehaviour | analysis/analyze_mouse_performance.py | 2 | 5547 | """analyze_dir.py:
Analyze a given directory. All trials are accumulated and plotted.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2016, Dilawar Singh "
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import os
import sys
import numpy as np
import dateutil
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
from collections import defaultdict
import logging
import re
import analyze_trial as at
import session_type as st
import math
matplotlib.rcParams.update( {'font.size' : 10} )
try:
plt.style.use('classic')
except Exception as e:
pass
args_ = None
csplus, csminus = [], []
csplusIdx, csminusIdx = [], []
distraction = []
distractionIdx = []
probes = []
probesIdx = []
def plot_subplot( ax, data, idx, tVec, aN, bN, title ):
csplusData = np.vstack( data )
plt.imshow( csplusData, cmap = "jet"
, extent = [tVec[aN], tVec[bN], len(idx), 0]
, vmin = data.min(), vmax = data.max()
, interpolation = 'none', aspect='auto'
)
# ax.set_xticks( range(0,len(idx),2), idx[::2] )
ax.set_xlabel( 'Time (ms)' )
ax.set_ylabel( '# Trial' )
ax.set_title( title )
ax.legend( )
# ax.colorbar( )
def accept( subdir_name, reject_list ):
for l in reject_list:
if l in subdir_name:
print( '[INFO] Dir %s is rejected' % subdir_name )
return False
return True
def plot_area_under_curve( cspData, normalised = True ):
if normalised:
outfile = os.path.join( args_.dir, 'area_under_tone_puff_normalised.png' )
else:
outfile = os.path.join( args_.dir, 'area_under_tone_puff_raw.png' )
for i, (t, sense, area) in enumerate(cspData):
ax = plt.subplot( math.ceil( len(cspData)/ 2.0 ), 2, i + 1 )
area = zip(*area)
if not normalised:
plt.scatter( area[0] , area[1] )
ax.set_xlim( 0, 3000 )
ax.set_ylim( 0, 3000 )
else:
plt.scatter( area[0] / np.max( area[0] ) , area[1] / np.max( area[1]))
plt.xlabel( 'Tone AOC' )
plt.ylabel( 'Puff AOC' )
plt.savefig( outfile )
print('[INFO] Saved tone/puff area scatter for all session to %s' % outfile)
def plot_performance( cspData ):
global args_
outfile = os.path.join( args_.dir, 'performance.png' )
sessions, performances = [], []
for i, (t, sense, area) in enumerate( cspData ):
sessions.append( i + 1 )
area = zip( *area )
tone, puff = area
performances.append( np.mean(tone) / np.mean( puff) )
plt.plot( sessions, performances , '-*')
plt.xlabel( '# Session ' )
plt.ylabel( 'Performance = tone / puff ' )
plt.savefig( outfile )
print( '[INFO] Performance is save to %s' % outfile )
def plot_csp_data( cspData ):
"""Plot CS_P type of trials from each session """
global args_
allSession = []
allArea = []
for t, sense, area in cspData:
allSession.append( np.mean(sense, axis=0) )
for i, sens in enumerate(allSession):
plt.subplot( len(allSession), 1, i + 1 )
plt.plot( sens, label = 'Session %s' % (i + 1) )
plt.legend( )
# plt.colorbar( )
outfile = os.path.join( args_.dir, 'all_cs_p.png' )
plt.savefig( outfile )
print( '[INFO] Saved all CS_P to %s' % outfile )
plt.figure( )
plot_area_under_curve( cspData, False )
plt.figure( )
plot_area_under_curve( cspData, True )
# Final performace.
plt.figure( )
plot_performance( cspData )
def rank_behaviour( session_type_dirs ):
"""Rank the behaviour of a given mouse. The directory session_type_dirs
contains all the data related to this mouse.
"""
cspData = []
areaData = []
for sd in session_type_dirs:
sessionData = st.session_data( sd )
cspData.append( sessionData['CS_P'] )
plot_csp_data( cspData )
def get_sessions( dir_name, **kwargs ):
ignoreSessionTypeList = kwargs.get( 'ignore_session_types', [] )
files = {}
validSubDirs = []
for d, sd, fs in os.walk( dir_name ):
stPat = re.compile( r'SessionType\d+' )
for sdd in sd:
if stPat.search( sdd ):
if accept( sdd, ignoreSessionTypeList ):
validSubDirs.append( os.path.join(d, sdd) )
rank_behaviour( validSubDirs )
def main( ):
global args_
if not args_.output_dir:
args_.output_dir = os.path.join(args_.dir, '_plots')
if not os.path.isdir( args_.output_dir):
os.makedirs( args_.output_dir )
sessions = get_sessions( args_.dir, ignore_session_types=[ 'SessionType12'] )
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''Scoring mouse performance'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--dir', '-d'
, required = True
, help = 'Directory to seach for behaviour data for a mouse'
)
parser.add_argument('--subplots', '-s'
, action = 'store_true'
, help = 'Each trial in subplot.'
)
parser.add_argument('--output_dir', '-o'
, required = False
, default = ''
, help = 'Directory to save results.'
)
class Args: pass
args_ = Args()
parser.parse_args(namespace=args_)
main( )
| gpl-3.0 |
lthurlow/Boolean-Constrained-Routing | networkx-1.8.1/build/lib.linux-i686-2.7/networkx/readwrite/tests/test_gml.py | 35 | 3099 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_duplicate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
def test_bool(self):
G=networkx.Graph()
G.add_node(1,on=True)
G.add_edge(1,2,on=False)
data = '\n'.join(list(networkx.generate_gml(G)))
answer ="""graph [
node [
id 0
label 1
on 1
]
node [
id 1
label 2
]
edge [
source 0
target 1
on 0
]
]"""
assert_equal(data,answer)
| mit |
mhue/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
calico/basenji | bin/basenji_sat_bed.py | 1 | 13595 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import gc
import json
import os
import pdb
import pickle
from queue import Queue
import random
import sys
from threading import Thread
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
'''
basenji_sat_bed.py
Perform an in silico saturation mutagenesis of sequences in a BED file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='mut_down',
default=0, type='int',
help='Nucleotides downstream of center sequence to mutate [Default: %default]')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-l', dest='mut_len',
default=0, type='int',
help='Length of center sequence to mutate [Default: %default]')
parser.add_option('-o', dest='out_dir',
default='sat_mut', help='Output directory [Default: %default]')
parser.add_option('--plots', dest='plots',
default=False, action='store_true',
help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='sum',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('-u', dest='mut_up',
default=0, type='int',
help='Nucleotides upstream of center sequence to mutate [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 3:
# single worker
params_file = args[0]
model_file = args[1]
bed_file = args[2]
elif len(args) == 4:
# master script
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
bed_file = args[3]
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
elif len(args) == 5:
# multi worker
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
bed_file = args[3]
worker_index = int(args[4])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = [sad_stat.lower() for sad_stat in options.sad_stats.split(',')]
if options.mut_up > 0 or options.mut_down > 0:
options.mut_len = options.mut_up + options.mut_down
else:
assert(options.mut_len > 0)
options.mut_up = options.mut_len // 2
options.mut_down = options.mut_len - options.mut_up
#################################################################
# read parameters and targets
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
# read targets
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = seqnn.SeqNN(params_model)
seqnn_model.restore(model_file)
seqnn_model.build_slice(target_slice)
seqnn_model.build_ensemble(options.rc, options.shifts)
num_targets = seqnn_model.num_targets()
#################################################################
# sequence dataset
# read sequences from BED
seqs_dna, seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta, params_model['seq_length'], stranded=True)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(seqs_dna), options.processes+1, dtype='int')
seqs_dna = seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
seqs_coords = seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(seqs_dna)
# determine mutation region limits
seq_mid = params_model['seq_length'] // 2
mut_start = seq_mid - options.mut_up
mut_end = mut_start + options.mut_len
# make sequence generator
seqs_gen = satmut_gen(seqs_dna, mut_start, mut_end)
#################################################################
# setup output
scores_h5_file = '%s/scores.h5' % options.out_dir
if os.path.isfile(scores_h5_file):
os.remove(scores_h5_file)
scores_h5 = h5py.File(scores_h5_file, 'w')
scores_h5.create_dataset('seqs', dtype='bool',
shape=(num_seqs, options.mut_len, 4))
for sad_stat in options.sad_stats:
scores_h5.create_dataset(sad_stat, dtype='float16',
shape=(num_seqs, options.mut_len, 4, num_targets))
# store mutagenesis sequence coordinates
"""
seqs_chr, seqs_start, _, seqs_strand = zip(*seqs_coords)
seqs_chr = np.array(seqs_chr, dtype='S')
seqs_start = np.array(seqs_start) + mut_start
seqs_end = seqs_start + options.mut_len
seqs_strand = np.array(seqs_strand, dtype='S')
scores_h5.create_dataset('chrom', data=seqs_chr)
scores_h5.create_dataset('start', data=seqs_start)
scores_h5.create_dataset('end', data=seqs_end)
scores_h5.create_dataset('strand', data=seqs_strand)
"""
# store mutagenesis sequence coordinates
scores_chr = []
scores_start = []
scores_end = []
scores_strand = []
for seq_chr, seq_start, seq_end, seq_strand in seqs_coords:
scores_chr.append(seq_chr)
scores_strand.append(seq_strand)
if seq_strand == '+':
score_start = seq_start + mut_start
score_end = score_start + options.mut_len
else:
score_end = seq_end - mut_start
score_start = score_end - options.mut_len
scores_start.append(score_start)
scores_end.append(score_end)
scores_h5.create_dataset('chr', data=np.array(scores_chr, dtype='S'))
scores_h5.create_dataset('start', data=np.array(scores_start))
scores_h5.create_dataset('end', data=np.array(scores_end))
scores_h5.create_dataset('strand', data=np.array(scores_strand, dtype='S'))
preds_per_seq = 1 + 3*options.mut_len
score_threads = []
score_queue = Queue()
for i in range(1):
sw = ScoreWorker(score_queue, scores_h5, options.sad_stats,
mut_start, mut_end)
sw.start()
score_threads.append(sw)
#################################################################
# predict scores, write output
# find center
preds_length = seqnn_model.target_lengths[0]
center_start = preds_length // 2
if preds_length % 2 == 0:
center_end = center_start + 2
else:
center_end = center_start + 1
# initialize predictions stream
preds_stream = stream.PredStreamGen(seqnn_model, seqs_gen, params_train['batch_size'])
# predictions index
pi = 0
for si in range(num_seqs):
print('Predicting %d' % si, flush=True)
# collect sequence predictions
seq_preds_sum = []
seq_preds_center = []
seq_preds_scd = []
preds_mut0 = preds_stream[pi]
for spi in range(preds_per_seq):
preds_mut = preds_stream[pi]
preds_sum = preds_mut.sum(axis=0)
seq_preds_sum.append(preds_sum)
if 'center' in options.sad_stats:
preds_center = preds_mut[center_start:center_end,:].sum(axis=0)
seq_preds_center.append(preds_center)
if 'scd' in options.sad_stats:
preds_scd = np.sqrt(((preds_mut-preds_mut0)**2).sum(axis=0))
seq_preds_scd.append(preds_scd)
pi += 1
seq_preds_sum = np.array(seq_preds_sum)
seq_preds_center = np.array(seq_preds_center)
seq_preds_scd = np.array(seq_preds_scd)
# wait for previous to finish
score_queue.join()
# queue sequence for scoring
seq_pred_stats = (seq_preds_sum, seq_preds_center, seq_preds_scd)
score_queue.put((seqs_dna[si], seq_pred_stats, si))
# queue sequence for plotting
if options.plots:
plot_queue.put((seqs_dna[si], seq_preds_sum, si))
gc.collect()
# finish queue
print('Waiting for threads to finish.', flush=True)
score_queue.join()
# close output HDF5
scores_h5.close()
def satmut_gen(seqs_dna, mut_start, mut_end):
"""Construct generator for 1 hot encoded saturation
mutagenesis DNA sequences."""
for seq_dna in seqs_dna:
# 1 hot code DNA
seq_1hot = dna_io.dna_1hot(seq_dna)
yield seq_1hot
# for mutation positions
for mi in range(mut_start, mut_end):
# for each nucleotide
for ni in range(4):
# if non-reference
if seq_1hot[mi,ni] == 0:
# copy and modify
seq_mut_1hot = np.copy(seq_1hot)
seq_mut_1hot[mi,:] = 0
seq_mut_1hot[mi,ni] = 1
yield seq_mut_1hot
class PlotWorker(Thread):
"""Compute summary statistics and write to HDF."""
def __init__(self, plot_queue, out_dir):
Thread.__init__(self)
self.queue = plot_queue
self.daemon = True
self.out_dir = out_dir
def run(self):
while True:
# unload predictions
seq_dna, seq_preds, si = self.queue.get()
print('Plotting %d' % si, flush=True)
# communicate finished task
self.queue.task_done()
class ScoreWorker(Thread):
"""Compute summary statistics and write to HDF."""
def __init__(self, score_queue, scores_h5, sad_stats, mut_start, mut_end):
Thread.__init__(self)
self.queue = score_queue
self.daemon = True
self.scores_h5 = scores_h5
self.sad_stats = sad_stats
self.mut_start = mut_start
self.mut_end = mut_end
def run(self):
while True:
try:
# unload predictions
seq_dna, seq_pred_stats, si = self.queue.get()
seq_preds_sum, seq_preds_center, seq_preds_scd = seq_pred_stats
print('Writing %d' % si, flush=True)
# seq_preds_sum is (1 + 3*mut_len) x (num_targets)
num_preds, num_targets = seq_preds_sum.shape
mut_len = self.mut_end - self.mut_start
# one hot code mutagenized DNA
seq_dna_mut = seq_dna[self.mut_start:self.mut_end]
seq_1hot_mut = dna_io.dna_1hot(seq_dna_mut)
# write to HDF5
self.scores_h5['seqs'][si,:,:] = seq_1hot_mut
for sad_stat in self.sad_stats:
# initialize scores
seq_scores = np.zeros((mut_len, 4, num_targets), dtype='float32')
# summary stat
if sad_stat == 'sum':
seq_preds_stat = seq_preds_sum
elif sad_stat == 'center':
seq_preds_stat = seq_preds_center
elif sad_stat == 'scd':
seq_preds_stat = seq_preds_scd
else:
print('Unrecognized summary statistic "%s"' % options.sad_stat)
exit(1)
# predictions index (starting at first mutagenesis)
pi = 1
# for each mutated position
for mi in range(mut_len):
# for each nucleotide
for ni in range(4):
if seq_1hot_mut[mi,ni]:
# reference score
seq_scores[mi,ni,:] = seq_preds_stat[0,:]
else:
# mutation score
seq_scores[mi,ni,:] = seq_preds_stat[pi,:]
pi += 1
# normalize positions
if sad_stat != 'sqdiff':
seq_scores -= seq_scores.mean(axis=1, keepdims=True)
# write to HDF5
self.scores_h5[sad_stat][si,:,:,:] = seq_scores.astype('float16')
except:
# communicate error
print('ERROR: Sequence %d failed' % si, file=sys.stderr, flush=True)
# communicate finished task
self.queue.task_done()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
B3AU/waveTree | sklearn/linear_model/tests/test_sgd.py | 5 | 29311 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.decision_function(self, X, *args, **kw)
def predict_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_proba(self, X, *args, **kw)
def predict_log_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_log_proba(self, X, *args, **kw)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# hinge loss does not allow for conditional prob estimate
clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1/3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
ratnania/pigasus | python/gallery/poisson_nonlin.py | 1 | 10940 | # -*- coding: UTF-8 -*-
#! /usr/bin/python
import sys
import numpy as np
from scipy.sparse.linalg import spsolve
from .poisson import *
from pigasus.fem.basicPDE import *
from numpy import abs
__all__ = ['poisson_picard', 'poisson_newton']
class poisson_picard(poisson):
"""
A multidimentional nonlinear Poisson class solver using Picard algorithm.
>>> import caid.cad_geometry as cg
>>> from caid.cad_geometry import line
>>> import pylab as pl
"""
#: Doc comment for class attribute gallery.poisson.
#: It can have multiple lines.
def __init__(self, *args, **kwargs):
"""Creates a nonlinear poisson PDE solver based on Picard algorithm.
geometry:
The geometry must be an object cad_geometry.
Returns:
A PDE object.
.. note::
See also: :ref:`fem.gallery.poisson`.
"""
# ...
poisson.__init__(self, *args, **kwargs)
# ...
#-----------------------------------
#-----------------------------------
def initialize(self, u0=None):
U = self.unknown
if u0 is None:
U.set(np.zeros(U.size))
return
# self.project(u0, field=U)
self.interpolate(u0, field=U)
#-----------------------------------
#-----------------------------------
def assembly(self, f=None, update=False):
poisson.assembly(self, f=f, update=update)
#-----------------------------------
#-----------------------------------
def solve(self, F, u0=None, maxiter=100, rtol=1.e-6, rtol2=1.e-6 \
, verbose=False, update=False):
"""
solves the nonlinear poisson equation using PIcard algorithm
F:
the rhs. it can be any function F(U, gradU, ..., x,y)
u0:
this is the initial value for u. Default: all B-splines coeff = 0
maxiter:
the maximum number of iterations for the Picard algorithm. Default 100
rtol:
the relative tolerance. Default 1.e-6
verbose:
True => print the error for each iteration
Returns:
The residual error (as a numpy array)
"""
# assembly the stifness matrix and bc terms
poisson.assembly(self, update=update)
# project u0 onto the discrete vectorial space
self.initialize(u0=u0)
# ...
PDE = self
V = PDE.space
un = PDE.unknown
rhs = self.rhs
# ...
rhs.func = F
# ...
from time import time
list_Err = [1.e6]
list_ErrH1 = [1.e6]
un_values = un.get()
normH1_old = np.dot(PDE.dot(un.get()), un.get())
i = 0
if verbose:
tb = time()
while (list_Err[-1] > rtol) and (list_ErrH1[-1] > rtol2) and (i < maxiter):
U_old_values = un.get()
# print "-------"
# print "solve"
# import matplotlib.pyplot as plt
## Phi = PDE.G_W
# Phi = PDE.unknown_dirichlet
## Phi.plot(withpcolor=True) ; plt.colorbar() ; plt.show()
# Phi.fast_plot() ; plt.colorbar() ; plt.show()
# print "-------"
# assembly the right hand side
rhs.reset()
self.update()
# solve and update unew
poisson.solve(self, rhs)
U_values = un.get()
err = np.linalg.norm(U_values-U_old_values)
list_Err.append(err)
normH1 = np.dot(PDE.dot(un.get()), un.get())
list_ErrH1.append(np.abs(normH1-normH1_old))
normH1_old = normH1
i += 1
if verbose:
print(i, ": "," |F(x)| = ", list_Err[-1]," |DF(x)| = ", list_ErrH1[-1])
if verbose:
te = time()
print(">> Elapsed time ", te-tb)
list_Err = np.asarray(list_Err[1:])
list_ErrH1 = np.asarray(list_ErrH1[1:])
return list_Err, list_ErrH1
#-----------------------------------
class poisson_newton(poisson):
"""
A multidimentional nonlinear Poisson class solver using Picard algorithm.
>>> import caid.cad_geometry as cg
>>> from caid.cad_geometry import line
>>> import pylab as pl
"""
#: Doc comment for class attribute gallery.poisson.
#: It can have multiple lines.
def __init__(self, *args, **kwargs):
"""Creates a nonlinear poisson PDE solver based on Picard algorithm.
geometry:
The geometry must be an object cad_geometry.
Returns:
A PDE object.
.. note::
See also: :ref:`fem.gallery.poisson`.
"""
try:
geometry = kwargs['geometry']
except:
pass
# ...
dim = geometry.dim
if dim == 1:
func_one = lambda x : [ 1. ]
func_zero = lambda x : [ 0. ]
func_stiff = lambda x : [ 1. ]
if dim == 2:
func_one = lambda x,y : [ 1. ]
func_zero = lambda x,y : [ 0. ]
func_stiff = lambda x,y : [ 1., 0. \
, 0., 1. ]
if dim == 3:
func_one = lambda x,y,z : [ 1. ]
func_zero = lambda x,y,z : [ 0. ]
func_stiff = lambda x,y,z : [ 1., 0., 0. \
, 0., 1., 0. \
, 0., 0., 1. ]
# ...
# ...
tc_d = {}
tc_d['A'] = func_stiff
tc_d['b'] = func_zero
try:
tc_d['AllDirichlet'] = kwargs['AllDirichlet']
except:
pass
try:
tc_d['bc_dirichlet'] = kwargs['bc_dirichlet']
except:
pass
try:
tc_d['bc_neumann'] = kwargs['bc_neumann']
except:
pass
try:
tc_d['Metric'] = kwargs['Metric']
except:
pass
# ...
# ...
poisson.__init__(self, *args, **kwargs)
self.Dn = basicPDE(geometry=geometry, testcase=tc_d)
# ...
# ...
#-----------------------------------
# #-----------------------------------
# def __del__(self):
# self.Dn.__del__()
# poisson.__del__(self)
# #-----------------------------------
#-----------------------------------
def free(self):
self.Dn.free()
poisson.free(self)
#-----------------------------------
#-----------------------------------
def initialize(self, u0=None):
U = self.unknown
if u0 is None:
U.set(np.zeros(U.size))
return
# self.project(u0, field=U)
self.interpolate(u0, field=U)
#-----------------------------------
#-----------------------------------
def solve(self, F, dF, u0=None, maxiter=100, rtol=1.e-6 \
, verbose=False, update=False):
"""
solves the nonlinear poisson equation using PIcard algorithm
F:
the rhs. it can be any function F(U, gradU, ..., x,y)
u0:
this is the initial value for u. Default: all B-splines coeff = 0
maxiter:
the maximum number of iterations for the Picard algorithm. Default 100
rtol:
the relative tolerance. Default 1.e-6
verbose:
True => print the error for each iteration
Returns:
The residual error (as a numpy array)
"""
# assembly the stifness matrix and bc terms
poisson.assembly(self, update=update)
self.Dn.assembly()
# project u0 onto the discrete vectorial space
self.initialize(u0=u0)
En = self
Dn = self.Dn
# ...
if En.Dirichlet:
U = En.unknown_dirichlet
else:
U = En.unknown
# ...
# ...
# current values
un = En.unknown
# unew-un
dn = Dn.unknown
# get the right hand side
rhs = En.rhs
# redefine the right hand side function
def rhs_func(x,y):
return F(U,x,y)
rhs.set_func(rhs_func)
def Mn_func(x,y):
return dF(U,x,y)
# get the mass operator
Mn = Dn.mass
# redefine the mass function
Mn.set_func (Mn_func)
# ...
# ...
from time import time
dn.reset()
list_Err = [1.e6]
list_ErrH1 = [1.e6]
un_values = un.get()
i = 0
tb = time()
while (list_Err[-1] > rtol) and (i < maxiter):
# assembly the right hand side
rhs.reset()
En.update()
Dn.assembly()
# compute the right hand side
g = rhs - En.dot (un)
# solve and update unew
Dn.solve (g)
un += dn
err = np.linalg.norm(dn.get())
list_Err.append(err)
err = np.dot(self.Dn.dot(dn.get()), dn.get())
list_ErrH1.append(abs(err))
i += 1
if verbose:
print(i, ": "," |F(x)| = ", list_Err[-1]," |DF(x)| = ", list_ErrH1[-1])
te = time()
print(">> Elapsed time ", te-tb)
list_Err = np.asarray(list_Err[1:])
list_ErrH1 = np.asarray(list_ErrH1[1:])
return list_Err, list_ErrH1
#-----------------------------------
if __name__ == '__main__':
from caid.cad_geometry import circle
from matplotlib import pylab as plt
sin = np.sin ; cos = np.cos ; exp = np.exp ; log = np.log ; sqrt = np.sqrt ; pi = np.pi
nx = 15 ; ny = 15
px = 2 ; py = 2
geo = circle(radius=1./sqrt(2), n=[nx,ny], p=[px,py])
# ...
u_exact = lambda x,y : [- 2.0 * log ( x**2 + y**2 + 0.5 )]
def F(U,x,y):
_U = U.evaluate()
return [4. * exp(_U)]
def dF (U,x, y):
_U = U.evaluate()
return[-4 * exp(_U)]
# ...
AllDirichlet = True
PDE_picard = poisson_picard( geometry=geo \
, AllDirichlet=AllDirichlet )
PDE_newton = poisson_newton( geometry=geo \
, AllDirichlet=AllDirichlet )
# ...
print(">>> Solving using Picard <<<")
# ...
PDE = PDE_picard
if PDE.Dirichlet:
U = PDE.unknown_dirichlet
else:
U = PDE.unknown
# ...
PDE_picard.solve(F, u0=None, maxiter=100, rtol=1.e-6, verbose=True)
print(">>> Solving using Newton <<<")
# ...
PDE = PDE_newton
if PDE.Dirichlet:
U = PDE.unknown_dirichlet
else:
U = PDE.unknown
# ...
PDE_newton.solve(F, dF, u0=None, maxiter=100, rtol=1.e-6, verbose=True)
print("norm using Picard ", PDE_picard.norm(exact=u_exact))
print("norm using Newton ", PDE_newton.norm(exact=u_exact))
# ...
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/core/computation/pytables.py | 7 | 18930 | """ manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.compat import u, string_types, DeepChainMap
from pandas.core.base import StringMixin
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.computation import expr, ops
from pandas.core.computation.ops import is_term, UndefinedVariableError
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.common import _ensure_decoded
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError(("Cannot compare {v} of type {typ}"
" to {kind} column").format(v=v, typ=type(v),
kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where]) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"%s"' % self.converted
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
| mit |
larsmans/seqlearn | seqlearn/_decode/tests/test_decode.py | 5 | 2198 | from nose.tools import assert_greater
from numpy.testing import assert_array_equal
import numpy as np
from sklearn.metrics import accuracy_score
from seqlearn._decode import bestfirst, viterbi
def test_wikipedia_example():
# HMM example taken from Wikipedia. Samples can be "normal", "cold" or
# "dizzy" (represented as one-hot feature vectors). States are "Healthy"
# and "Fever". ['normal', 'cold', 'dizzy'] has optimal state sequence
# ['Healthy', 'Healthy', 'Fever'].
start = np.log([.6, .4])
final = np.log([.5, .5]) # not given, so assume uniform probabilities
trans = np.log([[.7, .3],
[.4, .6]])
w = np.log([[.5, .4, .1],
[.1, .3, .6]])
X = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
score = np.dot(X, w.T)
assert_array_equal(bestfirst(score, None, trans, start, final), [0, 0, 1])
assert_array_equal(viterbi(score, None, trans, start, final), [0, 0, 1])
def test_dna():
# HMM example taken from Borodovsky and Ekisheva (2006), Problems and
# Solutions in Biological Sequence Analysis, p. 80.
# Four (one-hot) features T, C, A and G, two states H and L
# (high and low C+G content).
start = np.log([.5, .5])
final = start
trans = np.log([[.5, .5],
[.4, .6]])
# XXX in a binary problem, w of shape (n_features,) should be enough
w = np.log([[.2, .3, .2, .3],
[.3, .2, .3, .2]])
X = np.array([[0, 0, 0, 1], # G
[0, 0, 0, 1], # G
[0, 1, 0, 0], # C
[0, 0, 1, 0], # A
[0, 1, 0, 0], # C
[1, 0, 0, 0], # T
[0, 0, 0, 1], # G
[0, 0, 1, 0], # A
[0, 0, 1, 0]]) # A
score = np.dot(X, w.T)
# HHHLLLLLL
y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(viterbi(score, None, trans, start, final), y_true)
# For this problem, Viterbi actually is better than best-first.
bf = bestfirst(score, None, trans, start, final)
assert_greater(accuracy_score(y_true, bf), .75)
| mit |
TonySheh/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
robbymeals/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
robin-lai/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
CodingCat/mxnet | example/kaggle-ndsb1/gen_img_list.py | 42 | 7000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
linii/ling229-final | topic_modeling/pos_tag_selftext.py | 1 | 1060 | #!/usr/bin/python
import sys
import pickle
import pandas as pan
from nltk import pos_tag, word_tokenize
def add_pos_tags_to_english_docs(docs):
output = []
for doc in docs:
if not (type(doc) is str):
output.append([])
continue
sys.stdout.write("*")
tagged_toks = pos_tag(word_tokenize(doc.decode('utf-8')))
output.append(tagged_toks)
sys.stdout.flush()
sys.stdout.write("\n")
return output
if __name__ == '__main__':
n = int(sys.argv[1])
english_filename = sys.argv[2]
outfile_name = sys.argv[3]
print("Reading data file.")
english_docs = list(pan.read_csv(english_filename, nrows=n, encoding='utf-8')["selftext"])
english_docs = [english_docs[i].encode('utf-8') for i in range(len(english_docs))]
print("Starting tagging.")
english_sents_tagged = add_pos_tags_to_english_docs(english_docs)
with open(outfile_name + str(n), "w") as outfile:
pickle.dump(english_sents_tagged, outfile) | gpl-3.0 |
creyesp/RF_Estimation | Clustering/helpers/processClusters/drawClustersGrill.py | 4 | 8022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# drawClustersGrill.py
# Mónica Otero
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Code use to draw all ellipses from different clusters
import sys
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import argparse
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn import preprocessing
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import mlab as mlab
import math
from matplotlib.patches import Ellipse
from pylab import figure, show, savefig
parser = argparse.ArgumentParser(prog='drawClustersGrill.py',
description='Draw the ellipses for all the clusters',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFile',
help='Source File',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output File',
type=str, required=True)
parser.add_argument('--xPixels',
help='Number of xblocks',
type=int, default=31, required=False)
parser.add_argument('--yPixels',
help='Number of yblocks',
type=int, default=31, required=False)
parser.add_argument('--blockSize',
help='Block Size',
type=int, default=50, required=False)
args = parser.parse_args()
#Source file of the clusters and all the information
sourceFile = args.sourceFile
#Output file where the graphics are going to be placed
outputFolder = args.outputFolder
xPixels = args.xPixels
yPixels = args.yPixels
blockSize= args.blockSize
clustersColours = ['green', 'red', 'blue', 'orange','yellow','indigo',\
'#ff006f','#00e8ff','#fcfa00', '#ff0000', '#820c2c', \
'#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff', \
'#0c820e','#28ea04','#ea8404','#c8628f','#6283ff', \
'#5b6756','#0c8248','k','#820cff','#932c11', \
'#002c11','#829ca7']
## Loading and reading data
indata = np.loadtxt(sourceFile, delimiter=',', usecols=[20,21,22,23,24,25,27,28,29])
#indataLabels = np.loadtxt(sourceFile,usecols=[12],dtype=str)
print "> Reading input data: ", np.shape(indata)
# Random_Spikes_Selection.py
#0-19 Timestamps
# 20 aRadius
# 21 bRadius
# 22 angle
# 23 xCoordinate
# 24 yCoordinate
# 25 area
# 26 clusterId
# 27 peakTime
# 28 ON_OFF
spk_OFF = []
spk_ON = []
for i in range(len(indata[:,0])):
if( int(indata[i,8]) == 1):
spk_OFF.append(indata[i,:])
if( int(indata[i,8]) == 0):
spk_ON.append(indata[i,:])
spk_OFF = np.array(spk_OFF)
spk_ON = np.array(spk_ON)
nOFF = np.size(spk_OFF[:,1])
nON = np.size(spk_ON[:,1])
print "> Number of ON cells: ", nON, "(", 1.0*nON/(nON+nOFF), "%)"
print "> Number of OFF cells: ", nOFF, "(", 1.0*nOFF/(nON+nOFF), "%)"
# Separating data by grouped clusters
nclusters = np.max(indata[:,6])
print "> Number of clusters: ", nclusters + 1
resFig= figure()
bx = resFig.add_subplot(111, aspect='equal')
for cluster in range(int(nclusters)+1):
print cluster
fig = figure()
ax = fig.add_subplot(111, aspect='equal')
for unit in range(nOFF):
eWidth = float(spk_OFF[unit,0])
eHeight = float(spk_OFF[unit,1])
eAngle = float(spk_OFF[unit,2])
eXY = [float(spk_OFF[unit,3]), float(spk_OFF[unit,4])]
resE = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle)
bx.add_artist(resE)
resE.set_alpha(0.2)
resE.set_facecolor(clustersColours[int(spk_OFF[unit,6])])
bx.set_xlabel('Retina piece Xsize ('+r'$\mu$'+'m)')
bx.set_ylabel('Retina piece Ysize ('+r'$\mu$'+'m)')
if(int(spk_OFF[unit,6])==cluster):
e = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle, fill=False)
ax.add_artist(e)
e.set_alpha(0.6)
e.set_edgecolor(clustersColours[int(spk_OFF[unit,6])])
else:
e = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle, fill=False)
ax.add_artist(e)
e.set_edgecolor('gray')
e.set_alpha(0.2)
'''Xaxis = ax.xaxis
Yaxis = ax.yaxis
lengthX=len(Xaxis.get_ticklocs())
lengthY=len(Yaxis.get_ticklocs())
coefX=Xaxis.get_ticklocs()
coefY=Yaxis.get_ticklocs()
xlabels=coefX
ylabels=coefY
for value in range(lengthX):
xlabels[value]=int((xPixels/lengthX)*coefX[value]*(xPixels/lengthX)*blockSize)
for value in range(lengthY):
ylabels[value]=int((yPixels/lengthY)*coefY[value]*(yPixels/lengthY)*blockSize)'''
ax.set_xlim(0, xPixels)
ax.set_ylim(0, yPixels)
xlabels= ['0','250', '500', '750', '1000','1250', '1500']
ylabels= ['0','250', '500', '750', '1000','1250', '1500']
ax.set_xticklabels(xlabels)
ax.set_yticklabels(ylabels)
ax.set_xlabel('Retina piece Xsize ('+r'$\mu$'+'m)')
ax.set_ylabel('Retina piece Ysize ('+r'$\mu$'+'m)')
fig.savefig(outputFolder + '/OFF_Grill_' + str(cluster)+ '.pdf', dpi=None, bbox_inches='tight', format='pdf')
xlabels= ['0','250', '500', '750', '1000','1250', '1500']
ylabels= ['0','250', '500', '750', '1000','1250', '1500']
bx.set_xlim(0, xPixels)
bx.set_ylim(0, yPixels)
bx.set_xticklabels(xlabels)
bx.set_yticklabels(ylabels)
bx.set_xlabel('Retina piece Xsize ('+r'$\mu$'+'m)')
bx.set_ylabel('Retina piece Ysize ('+r'$\mu$'+'m)')
resFig.savefig(outputFolder + '/GeneralGrill_OFF.pdf', dpi=None, bbox_inches='tight',format='pdf')
plt.close(resFig)
plt.close(fig)
resFig= figure()
bx = resFig.add_subplot(111, aspect='equal')
for cluster in range(int(nclusters)+1):
print cluster
fig = figure()
ax = fig.add_subplot(111, aspect='equal')
for unit in range(nON):
eWidth = float(spk_ON[unit,0])
eHeight = float(spk_ON[unit,1])
eAngle = float(spk_ON[unit,2])
eXY = [float(spk_ON[unit,3]), float(spk_ON[unit,4])]
resE = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle)
bx.add_artist(resE)
resE.set_alpha(0.2)
resE.set_facecolor(clustersColours[int(spk_ON[unit,6])])
if(spk_ON[unit,6]==cluster):
e = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle, fill=False)
ax.add_artist(e)
e.set_edgecolor(clustersColours[int(spk_ON[unit,6])])
e.set_alpha(0.6)
else:
e = Ellipse(xy=eXY, width=eWidth, height=eHeight, angle=eAngle, fill=False)
ax.add_artist(e)
e.set_edgecolor('gray')
e.set_alpha(0.2)
'''Xaxis = ax.xaxis
Yaxis = ax.yaxis
lengthX=len(Xaxis.get_ticklocs())
print lengthX
lengthY=len(Yaxis.get_ticklocs())
coefX=Xaxis.get_ticklocs()
coefY=Yaxis.get_ticklocs()
for value in range(lengthX):
xlabels[value]=int((xPixels/lengthX)*coefX[value]*(xPixels/lengthX)*blockSize)
for value in range(lengthY):
ylabels[value]=int((yPixels/lengthY)*coefY[value]*(yPixels/lengthY)*blockSize)'''
ax.set_xlim(0, xPixels)
ax.set_ylim(0, yPixels)
xlabels= ['0','250', '500', '750', '1000','1250', '1500']
ylabels= ['0','250', '500', '750', '1000','1250', '1500']
ax.set_xticklabels(xlabels)
ax.set_yticklabels(ylabels)
ax.set_xlabel('Retina piece Xsize ('+r'$\mu$'+'m)')
ax.set_ylabel('Retina piece Ysize ('+r'$\mu$'+'m)')
fig.savefig(outputFolder + '/ON_Grill_' + str(cluster)+ '.pdf', dpi=None, bbox_inches='tight',format='pdf')
xlabels= ['0','250', '500', '750', '1000','1250', '1500']
ylabels= ['0','250', '500', '750', '1000','1250', '1500']
bx.set_xlim(0, xPixels)
bx.set_ylim(0, yPixels)
bx.set_xticklabels(xlabels)
bx.set_yticklabels(ylabels)
bx.set_xlabel('Retina piece Xsize ('+r'$\mu$'+'m)')
bx.set_ylabel('Retina piece Ysize ('+r'$\mu$'+'m)')
resFig.savefig(outputFolder + '/GeneralGrill_ON.pdf', dpi=None, bbox_inches='tight', format='pdf')
| gpl-2.0 |
mblondel/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
michaelneuder/image_quality_analysis | bin/nets/wip/ms_ssim_nets/iqa_tools.py | 2 | 12596 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import pandas as pd
import numpy as np
import time
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def normalize_input(train_data, test_data):
'''
input: two image sets (1 training, 1 test)
returns: two image sets of same dimension but standardized.
'''
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def get_epoch(x, y, n):
'''
input: set of images, set of targets, size of batch
returns: dict with key being the minibatch number and the value being a length 2 list with
the features in first index and targets in the second.
'''
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def calculate_ssim(window_orig, window_recon):
'''
input: two 11x11 windows of orig and recon image.
returns: ssim score (single value) for the patches
'''
k_1, k_2, L = 0.01, 0.03, 255
if window_orig.shape != (11,11) or window_recon.shape != (11,11):
raise ValueError('please check window size for SSIM calculation!')
orig_data, recon_data = window_orig.flatten(), window_recon.flatten()
mean_x, mean_y = np.mean(orig_data), np.mean(recon_data)
var_x, var_y = np.var(recon_data), np.var(orig_data)
covar = np.cov(orig_data, recon_data)[0][1]
c_1, c_2 = (L*k_2)**2, (L*k_1)**2
num = (2*mean_x*mean_y+c_1)*(2*covar+c_2)
den = (mean_x**2+mean_y**2+c_1)*(var_x+var_y+c_2)
return num/den
def calculate_ssim_image(image_orig, image_recon):
'''
input: orig and recon image
returns: ssim score pixelwise with no zero-padding.
'''
ssim_res = []
filter_dim = 11; image_dim = image_orig.shape[0];
number_windows = image_dim - filter_dim + 1
for i in range(number_windows):
for j in range(number_windows):
orig_window = image_orig[i:i+11, j:j+11]
recon_window = image_recon[i:i+11, j:j+11]
temp = calculate_ssim_patch(orig_window, recon_window)
ssim_res.append(temp)
return np.asarray(ssim_res)
def calculate_contrast(window_orig, window_recon):
'''
input: orig and recon patch of image.
returns: single contrast score for patches
'''
k_2, L = 0.03, 255
c_2 = (L*k_2)**2
orig_data, recon_data = window_orig.flatten(), window_recon.flatten()
var_x, var_y = np.var(recon_data), np.var(orig_data)
num = 2*np.sqrt(var_x)*np.sqrt(var_y) + c_2
den = var_x + var_y + c_2
return num/den
def calculate_structure(window_orig, window_recon):
'''
input: orig and recon patch of image.
returns: single structure score for patches
'''
k_2, L = 0.03, 255
c_2 = (L*k_2)**2
c_3 = c_2 / 2
orig_data, recon_data = window_orig.flatten(), window_recon.flatten()
std_x, std_y = np.std(recon_data), np.std(orig_data)
covar = np.cov(orig_data, recon_data)[0][1]
num = covar + c_3
den = std_x * std_y + c_3
return num/den
def calculate_contrast_image(orig_im, recon_im):
'''
input: orig and recon image.
returns: contrast scores pixelwise for the image.
'''
contrast_res = []
number_windows = orig_im.shape[0] - filter_dim + 1
for i in range(number_windows):
for j in range(number_windows):
orig_window = orig_im[i:i+11, j:j+11]
recon_window = recon_im[i:i+11, j:j+11]
temp = calculate_contrast(orig_window, recon_window)
contrast_res.append(temp)
return np.reshape(contrast_res, (number_windows, number_windows))
def calculate_structure_image(orig_im, recon_im):
'''
input: orig and recon image.
returns: structure pixelwise.
'''
structure_res = []
number_windows = orig_im.shape[0] - filter_dim + 1
for i in range(number_windows):
for j in range(number_windows):
orig_window = orig_im[i:i+11, j:j+11]
recon_window = recon_im[i:i+11, j:j+11]
temp = calculate_structure(orig_window, recon_window)
structure_res.append(temp)
return np.reshape(structure_res, (number_windows, number_windows))
def down_sample(orig_im, recon_im, pool_size):
'''
input: orig, recon, size of pool
return: a tuple of original and reconstructed images after down sampling
'''
reduce_im_orig, reduce_im_recon = [], []
number_pools = int(orig_im.shape[0] / pool_size)
for i in range(number_pools):
for j in range(number_pools):
orig_pool = orig_im[i*pool_size:i*pool_size+pool_size, j*pool_size:j*pool_size+pool_size]
recon_pool = recon_im[i*pool_size:i*pool_size+pool_size, j*pool_size:j*pool_size+pool_size]
temp_orig, temp_recon = np.mean(orig_pool), np.mean(recon_pool)
reduce_im_orig.append(temp_orig)
reduce_im_recon.append(temp_recon)
return np.reshape(reduce_im_orig, (number_pools,number_pools)), np.reshape(reduce_im_recon, (number_pools,number_pools))
def calculate_luminance(window_orig, window_recon):
'''
input: patch of recon and orig image
returns: luminance score for the patches
'''
k_1, L = 0.01, 255
c_1 = (L*k_1)**2
orig_data, recon_data = window_orig.flatten(), window_recon.flatten()
mean_x, mean_y = np.mean(recon_data), np.mean(orig_data)
num = 2*mean_x*mean_y + c_1
den = np.square(mean_x)+ np.square(mean_y) + c_1
return num/den
def calculate_luminance_image(orig_im, recon_im):
'''
input: orig and recon images
returns: pixelwise luminance score with no zero padding.
'''
luminance_res = []
number_windows = orig_im.shape[0] - filter_dim + 1
for i in range(number_windows):
for j in range(number_windows):
orig_window = orig_im[i:i+11, j:j+11]
recon_window = recon_im[i:i+11, j:j+11]
temp = calculate_luminance(orig_window, recon_window)
luminance_res.append(temp)
return np.reshape(luminance_res, (number_windows, number_windows))
def calculate_msssim_image(orig, recon):
'''
input: orig and recon images
returns: single msssim value for pair of images
'''
contrast1, structure1 = calculate_contrast_image(orig, recon), calculate_structure_image(orig, recon)
orig_ds1, recon_ds1 = down_sample(orig, recon)
contrast2, structure2 = calculate_contrast_image(orig_ds1, recon_ds1), calculate_structure_image(orig_ds1, recon_ds1)
orig_ds2, recon_ds2 = down_sample(orig_ds1, recon_ds1)
contrast3, structure3 = calculate_contrast_image(orig_ds2, recon_ds2), calculate_structure_image(orig_ds2, recon_ds2)
luminance = calculate_luminance_image(orig_ds2, recon_ds2)
return contrast1*contrast2*contrast3*structure1*structure2*structure3*luminance
def load_data(local = False, path = ''):
'''
input: boolean for if the files are local. if they are local then data path must be specified
returns: tuple with training original, training recon, testing orig, testing recon
'''
if local and (path == ''):
raise ValueError('please specify a data path')
if local:
data_path = path
else:
data_path = 'https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/'
image_dim, result_dim = 96, 86
input_layer, output_layer = 4, 1
input_layer, first_layer, second_layer, third_layer, fourth_layer, output_layer = 4, 100, 50, 25, 10, 1
filter_dim, filter_dim2 = 11, 1
# train data --- 500 images, 96x96 pixels
orig_500 = pd.read_csv('{}orig_500.txt'.format(data_path), header=None, delim_whitespace = True)
recon_500 = pd.read_csv('{}recon_500.txt'.format(data_path), header=None, delim_whitespace = True)
# test data --- 140 images, 96x96 pixels
orig_140 = pd.read_csv('{}orig_140.txt'.format(data_path), header=None, delim_whitespace = True)
recon_140 = pd.read_csv('{}recon_140.txt'.format(data_path), header=None, delim_whitespace = True)
# targets
ssim_500 = pd.read_csv('{}ssim_500_nogauss.csv'.format(data_path), header=None)
ssim_140 = pd.read_csv('{}ssim_140_nogauss.csv'.format(data_path), header=None)
# getting 4 input channels for train and test --- (orig, recon, orig squared, recon squared)
original_images_train = orig_500.values
original_images_train_sq = orig_500.values**2
reconstructed_images_train = recon_500.values
reconstructed_images_train_sq = recon_500.values**2
original_images_test = orig_140.values
original_images_test_sq = orig_140.values**2
reconstructed_images_test = recon_140.values
reconstructed_images_test_sq = recon_140.values**2
# stack inputs
training_input = np.dstack((original_images_train, reconstructed_images_train, original_images_train_sq, reconstructed_images_train_sq))
testing_input = np.dstack((original_images_test, reconstructed_images_test, original_images_test_sq, reconstructed_images_test_sq))
# normalize inputs
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
# target values
training_target = ssim_500.values
testing_target = ssim_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping features to (num images, 96x96, 4 channels)
train_features = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,input_layer])
test_features = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,input_layer])
# reshaping target to --- (num images, 86x86, 1)
train_target = np.reshape(training_target, [train_size, result_dim, result_dim, output_layer])
test_target = np.reshape(testing_target, [test_size, result_dim, result_dim, output_layer])
return train_features, train_target, test_features, test_target
def plot_sample(train_features, train_target):
plt.figure(figsize = (12,12))
gs1 = gridspec.GridSpec(3, 3)
gs1.update(wspace=0, hspace=0.03)
for i in range(3):
x = np.random.randint(500)
ax1, ax2, ax3 = plt.subplot(gs1[3*i]), plt.subplot(gs1[3*i+1]), plt.subplot(gs1[3*i+2])
for ax in [ax1, ax2, ax3]:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if i == 0:
ax1.set_title('original', size=20)
ax2.set_title('reconstructed', size=20)
ax3.set_title('ssim', size=20)
ax1.imshow(train_features[x,:,:,0], cmap='gray')
ax2.imshow(train_features[x,:,:,1], cmap='gray')
ax3.imshow(train_target[x,:,:,0], cmap='plasma')
plt.show()
return
def convolve_inner_layers(x, W, b):
'''
inner layers of network --- tanh activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return tf.nn.relu(y)
def convolve_ouput_layer(x, W, b):
'''
output layer of network --- linear activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
'''
entire conv net. each layer feed to following layer as well as output layer
'''
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
conv4 = convolve_inner_layers(conv3, W['weights4'], b['bias4'])
output_feed = tf.concat([conv1, conv2, conv3, conv4],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
| mit |
hadim/spindle_tracker | spindle_tracker/tracker/solver/by_frame_solver.py | 1 | 9174 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
import numpy as np
from ...utils import print_progress
from ..matrix import CostMatrix
from ..cost_function import AbstractCostFunction
from ..cost_function.brownian import BrownianLinkCostFunction
from ..cost_function.diagonal import DiagonalCostFunction
from ..cost_function.directed import BasicDirectedLinkCostFunction
from . import AbstractSolver
__all__ = []
class ByFrameSolver(AbstractSolver):
"""
Parameters
----------
trajs : :class:`pandas.DataFrame`
cost_functions : list of list
"""
def __init__(self, trajs, cost_functions, coords=['x', 'y', 'z']):
super(self.__class__, self).__init__(trajs)
self.t_in = 0
self.t_out = 0
self.coords = coords
self.trajs.check_trajs_df_structure(index=['t_stamp', 'label'],
columns=['t'] + coords)
self.link_cf = cost_functions['link']
self.check_cost_function_type(self.link_cf, AbstractCostFunction)
self.birth_cf = cost_functions['birth']
self.check_cost_function_type(self.birth_cf, AbstractCostFunction)
self.death_cf = cost_functions['death']
self.check_cost_function_type(self.death_cf, AbstractCostFunction)
self.max_assigned_cost = self.death_cf.context['cost']
@classmethod
def for_brownian_motion(cls, trajs,
max_speed,
penalty=1.05,
coords=['x', 'y', 'z']):
"""
Parameters
----------
trajs : :class:`spindle_tracker.trajectories.Trajectories`
max_speed : float
Maximum objects velocity
penalty : float
coords : list
Which columns to choose in trajs when computing distances.
Examples
--------
>>> true_trajs = Trajectories(data.brownian_trajectories_generator())
>>>
>>> # Remove labels
>>> true_trajs.relabel(np.arange(len(true_trajs)))
>>>
>>> solver = ByFrameSolver.for_brownian_motion(true_trajs, max_speed=5, penalty=2.)
>>> new_trajs = solver.track()
2014:INFO:by_frame_solver: Initiating frame by frame tracking.
2014:INFO:by_frame_solver: Frame by frame tracking done. 5 segments found (500 before).
"""
guessed_cost = np.float(max_speed ** 2) * penalty
diag_context = {'cost': guessed_cost}
diag_params = {'penalty': penalty, 'coords': coords}
link_cost_func = BrownianLinkCostFunction(parameters={'max_speed': max_speed,
'coords': coords})
birth_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
death_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
cost_functions = {'link': link_cost_func,
'birth': birth_cost_func,
'death': death_cost_func}
return cls(trajs, cost_functions, coords=coords)
@classmethod
def for_directed_motion(cls, trajs,
max_speed,
penalty=1.05,
past_traj_time=10,
smooth_factor=0,
interpolation_order=1,
coords=['x', 'y', 'z']):
"""Link objects according to their distance found in trajectories frame by frame.
Parameters
----------
trajs : :class:`spindle_tracker.trajectories.Trajectories`
max_speed : float
Maximum objects velocity
penalty : float
past_traj_time : float
Time during which the tracker can make a gap close. Above this time all gap
close event will discarded.
smooth_factor : float
Smoothing condition used in :func:`scipy.interpolate.splrep`
interpolation_order : int
The order of the spline fit. See :func:`scipy.interpolate.splrep`
coords : list
Which columns to choose in trajs when computing distances.
"""
parameters = {'max_speed': max_speed,
'past_traj_time': past_traj_time,
'smooth_factor': smooth_factor,
'interpolation_order': interpolation_order,
'coords': coords}
guessed_cost = 20 * penalty
diag_context = {'cost': guessed_cost}
diag_params = {'penalty': penalty}
link_context = {'trajs': trajs}
link_cost_func = BasicDirectedLinkCostFunction(parameters=parameters,
context=link_context)
birth_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
death_cost_func = DiagonalCostFunction(context=diag_context,
parameters=diag_params)
cost_functions = {'link': link_cost_func,
'birth': birth_cost_func,
'death': death_cost_func}
return cls(trajs, cost_functions, coords=coords)
@property
def blocks_structure(self):
return [[self.link_cf.mat, self.death_cf.mat],
[self.birth_cf.mat, None]]
@property
def pos_in(self):
return self.trajs.loc[self.t_in]
@property
def pos_out(self):
return self.trajs.loc[self.t_out]
def track(self, progress_bar=False, progress_bar_out=None):
"""
Returns
-------
self.trajs : :class:`pandas.DataFrame`
progress_bar : bool
Display progress bar
progress_bar_out : OutStream
For testing purpose only
"""
log.info('Initiating frame by frame tracking.')
old_labels = self.trajs.index.get_level_values('label').values
self.trajs['new_label'] = old_labels.astype(np.float)
ts_in = self.trajs.t_stamps[:-1]
ts_out = self.trajs.t_stamps[1:]
n_labels_before = len(self.trajs.labels)
n = len(ts_in)
for i, (t_in, t_out) in enumerate(zip(ts_in, ts_out)):
if progress_bar:
progress = i / n * 100
message = "t_in : {} | t_out {}".format(t_in, t_out)
print_progress(progress, message=message, out=progress_bar_out)
self.one_frame(t_in, t_out)
if progress_bar:
print_progress(-1)
self.relabel_trajs()
n_labels_after = len(self.trajs.labels)
mess = 'Frame by frame tracking done. {} segments found ({} before).'
log.info(mess.format(n_labels_after, n_labels_before))
return self.trajs
def one_frame(self, t_in, t_out):
"""
Parameters
----------
t_in : int
t_out : int
"""
self.t_in = t_in
self.t_out = t_out
pos_in = self.pos_in
pos_out = self.pos_out
self.link_cf.context['pos_in'] = pos_in
self.link_cf.context['pos_out'] = pos_out
self.link_cf.get_block()
self.birth_cf.context['objects'] = pos_out
self.birth_cf.get_block()
self.death_cf.context['objects'] = pos_in
self.death_cf.get_block()
self.cm = CostMatrix(self.blocks_structure)
self.cm.solve()
self.assign()
def assign(self):
"""
"""
row_shapes, col_shapes = self.cm.get_shapes()
last_in_link = row_shapes[0]
last_out_link = col_shapes[0]
new_labels_in = self.trajs.loc[self.t_in]['new_label'].values
new_labels_out = np.arange(last_out_link)
for idx_out, idx_in in enumerate(self.cm.out_links[:last_out_link]):
if idx_in >= last_in_link:
# new segment
new_label = self.trajs['new_label'].max() + 1.
else:
# assignment
new_label = new_labels_in[idx_in]
self._update_max_assign_cost(self.cm.mat[idx_in, idx_out])
new_labels_out[idx_out] = new_label
self.trajs.loc[self.t_out, 'new_label'] = new_labels_out
# The line below looks much slower than the two lines above
# self.trajs.loc[self.t_out, 'new_label'].iloc[idx_out] = new_label
def _update_max_assign_cost(self, cost):
"""
"""
if cost > self.max_assigned_cost:
self.max_assigned_cost = cost
new_b_cost = self.max_assigned_cost * self.birth_cf.parameters['penalty']
new_d_cost = self.max_assigned_cost * self.death_cf.parameters['penalty']
self.birth_cf.context['cost'] = new_b_cost
self.death_cf.context['cost'] = new_d_cost
| bsd-3-clause |
pastephens/pysal | pysal/contrib/pdio/dbf.py | 7 | 6661 | """miscellaneous file manipulation utilities
"""
import numpy as np
import pysal as ps
import pandas as pd
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps.open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <darribas@asu.edu>, Luc Anselin <luc.anselin@asu.edu>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps.open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <darribas@asu.edu> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
eaplatanios/tensorflow | tensorflow/python/estimator/canned/baseline_test.py | 11 | 54918 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import baseline
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressor(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifier(*args, **kwargs)
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132)
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = 1.3133 + 0.3132
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = 1.3133 * 1. + 0.3132 * 2.
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 2. / (1. + 2.),
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'classes': [label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with ops.Graph().as_default():
logit_fn = baseline._baseline_logit_fn_builder(num_outputs=2)
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('baseline', reuse=True):
bias_var = variable_scope.get_variable('bias')
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
rwuilbercq/Hive | Example3_EvolveAPainting.py | 1 | 5456 | #!/usr/bin/env python
# ---- MODULE DOCSTRING
__doc__ = """
(C) Hive, Romain Wuilbercq, 2017
_
/_/_ .'''.
=O(_)))) ...' `.
\_\ `. .'''X
`..'
.---. .---..-./`) ,---. ,---. .-''-.
| | |_ _|\ .-.')| / | | .'_ _ \
| | ( ' )/ `-' \| | | .'/ ( ` ) '
| '-(_{;}_)`-'`"`| | _ | |. (_ o _) |
| (_,_) .---. | _( )_ || (_,_)___|
| _ _--. | | | \ (_ o._) /' \ .---.
|( ' ) | | | | \ (_,_) / \ `-' /
(_{;}_)| | | | \ / \ /
'(_,_) '---' '---' `---` `'-..-'
The Artificial Bee Colony (ABC) algorithm is based on the
intelligent foraging behaviour of honey bee swarm, and was first proposed
by Karaboga in 2005.
Description:
-----------
This example shows how to evolve a famous painting using polygons.
The location of a number of polygons and RGB colors are evolved by an Artificial
Bee Colony algorithm to replicate a famous painting from Henri Matisse.
This example is inspired by a blog post written by Roger Alsing.
Reference:
---------
http://rogeralsing.com/2008/12/07/genetic-programming-evolution-of-mona-lisa/
Dependencies:
------------
- PIL
- sklearn-image
- numpy
- matplotlib
"""
# ---- IMPORT MODULES
# import internal modules
from Hive import Hive
# import external modules
import numpy as np
from sklearn.metrics import mean_squared_error as mse
try:
from PIL import ImageChops, Image
except:
raise ImportError("PIL module not found.")
try:
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import matplotlib as mpl
except:
raise ImportError("matplotlib module not found.")
try:
from skimage import color
except:
raise ImportError("sklearn-image module not found.")
# ---- PROCESS IMAGE
# loads original image
source_image = Image.open("assets/matisse.jpg")
xsize, ysize = source_image.size
# post-processes source image as a np.ndarray
SOURCE_IMAGE = np.array(source_image)
# defines size of image [pixels/inch]
dpi = 80
# converts image to gray scale
source_image_gray = color.rgb2gray(SOURCE_IMAGE)
# ---- DEFINE BLANK CANVAS
# define image polygons parameters
nb_polygons, nb_pts_per_polygon, nb_rgb = 8, 4, 3
def polygon(x, y, up=1):
""" Creates a polygon. """
# defines vertex coordinates of a dummy polygon "path"
vertices = [(x[0], y[0]), (x[1], y[1]),
(x[2], y[2]), (x[3], y[3]),
(x[0], y[0]) ]
# creates a polygon
poly = mpath.Path(vertices, [mpath.Path.MOVETO] + \
(len(vertices)-1) * [mpath.Path.LINETO])
# returns polygon
return poly
def create_image(vector):
""" Creates an image from a set of polygons. """
# converts vector input to numpy.ndarray
vector = np.array(vector)
# creates a list of shapes and colors
shapes = []; colors = [];
for ix in range(nb_polygons):
# processes input vector - "unrolling" vector
ind_start_x = ix * (nb_pts_per_polygon * 2 + 3)
ind_start_y = ind_start_x + 4
ind_start_c = ind_start_y + 4
x = vector[ind_start_x : ind_start_y]
y = vector[ind_start_y : ind_start_c]
color = vector[ind_start_c : ind_start_c + 3]
# creates list of polygons and colors
shapes.append(polygon(x*xsize, y*ysize))
colors.append([color[i] for i in range(3)])
# creates a figure of the same dimension as source image
fig, ax = plt.subplots(figsize=(xsize/dpi, ysize/dpi), dpi=dpi)
ax.set_rasterization_zorder(1)
# creates a collection of polygonal shapes
set_of_shapes = mpl.collections.PathCollection(shapes,
facecolor=colors,
linewidth=0)
# creates an image
ax.add_collection(set_of_shapes)
ax.set_frame_on(False)
ax.axis('off')
ax.autoscale_view()
# draws image
fig.tight_layout(pad=0)
fig.canvas.draw()
# converts image to np.ndarray
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# returns image array
return image
# ---- CREATE EVALUATOR
def compare_images_mse(source_image, new_image):
""" Computes the root mean-square. """
err = np.sum((source_image.astype("float") - new_image.astype("float")) ** 2)
err /= float(source_image.shape[0] * source_image.shape[1])
return err
def evaluator(vector):
""" Computes similarity between newly created and source image. """
# creates an image
image = create_image(vector)
# closes current figure
plt.close()
# compare new image with source image
return compare_images_mse(SOURCE_IMAGE, image)
# ---- SOLVE TEST CASE
def run():
# creates model
ndim = int(nb_polygons * (2 * nb_pts_per_polygon + nb_rgb))
model = Hive.BeeHive(lower = [0]*ndim ,
upper = [1]*ndim ,
fun = evaluator ,
numb_bees = 20 ,
max_itrs = 1000 ,
verbose = True ,)
# runs model
model.run()
# saves an image of the end result
solution = create_image(model.solution)
plt.savefig('solutionMatisse.png', bbox_inches='tight')
if __name__ == "__main__":
run()
# ---- END
| mit |
foreversand/QSTK | Bin/Data_CSV.py | 5 | 3301 | #File to read the data from mysql and push into CSV.
# Python imports
import datetime as dt
import csv
import copy
import os
import pickle
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# QSTK imports
from QSTK.qstkutil import qsdateutil as du
import QSTK.qstkutil.DataEvolved as de
def get_data(ls_symbols, ls_keys):
'''
@summary: Gets a data chunk for backtesting
@param dt_start: Start time
@param dt_end: End time
@param ls_symbols: symbols to use
@note: More data will be pulled from before and after the limits to ensure
valid data on the start/enddates which requires lookback/forward
@return: data dictionry
'''
print "Getting Data from MySQL"
# Modify dates to ensure enough data for all features
dt_start = dt.datetime(2005,1,1)
dt_end = dt.datetime(2012, 8, 31)
ldt_timestamps = du.getNYSEdays( dt_start, dt_end, dt.timedelta(hours=16) )
c_da = de.DataAccess('mysql')
ldf_data = c_da.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return d_data
def read_symbols(s_symbols_file):
ls_symbols=[]
file = open(s_symbols_file, 'r')
for f in file.readlines():
j = f[:-1]
ls_symbols.append(j)
file.close()
return ls_symbols
def csv_sym(sym, d_data, ls_keys, s_directory):
bool_first_iter = True
for key in ls_keys:
if bool_first_iter == True:
df_sym = d_data[key].reindex(columns = [sym])
df_sym = df_sym.rename(columns = {sym : key})
bool_first_iter = False
else:
df_temp = d_data[key].reindex(columns = [sym])
df_temp = df_temp.rename(columns = {sym : key})
df_sym = df_sym.join(df_temp, how= 'outer')
symfilename = sym.split('-')[0]
sym_file = open(s_directory + symfilename + '.csv', 'w')
sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n")
ldt_timestamps = list(df_sym.index)
ldt_timestamps.reverse()
for date in ldt_timestamps:
date_to_csv = '{:%Y-%m-%d}'.format(date)
string_to_csv = date_to_csv
for key in ls_keys:
string_to_csv = string_to_csv + ',' + str(df_sym[key][date])
string_to_csv = string_to_csv + '\n'
sym_file.write(string_to_csv)
def main(s_directory, s_symbols_file):
#ls_symbols = read_symbols(s_symbols_file)
ls_symbols = ['ACS-201002','BDK-201003','BJS-201004','BSC-201108','CCT-201111','EQ-200907','JAVA-201002','NCC-200901','NOVL-201104','PBG-201003','PTV-201011','ROH-200904','SGP-200911','SII-201008','WB-200901','WYE-200910','XTO-201006']
ls_keys = ['actual_open', 'actual_high', 'actual_low', 'actual_close', 'volume', 'close']
d_data = get_data(ls_symbols, ls_keys)
# print d_data
print "Creating CSV files now"
for sym in ls_symbols:
print sym
csv_sym(sym,d_data, ls_keys, s_directory)
print "Created all CSV files"
if __name__ == '__main__' :
s_directory = 'MLTData/'
s_directory = os.environ['QSDATA'] + '/Yahoo/'
s_symbols_file1 = 'MLTData/sp5002012.txt'
s_symbols_file2 = 'MLTData/index.txt'
s_symbols_file3 = 'MLTData/sp5002008.txt'
main(s_directory, s_symbols_file3) | bsd-3-clause |
samzhang111/scikit-learn | sklearn/metrics/base.py | 22 | 4802 |
"""
Common code for all metrics
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as UndefinedMetricWarning_
from ..utils import deprecated
class UndefinedMetricWarning(UndefinedMetricWarning_):
pass
UndefinedMetricWarning = deprecated("UndefinedMetricWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(UndefinedMetricWarning)
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
hainm/statsmodels | statsmodels/iolib/foreign.py | 25 | 43125 | """
Input/Output tools for working with binary data.
The Stata input tools were originally written by Joe Presbrey as part of PyDTA.
You can find more information here http://presbrey.mit.edu/PyDTA
See also
---------
numpy.lib.io
"""
from statsmodels.compat.python import (zip, lzip, lmap, lrange, string_types, long, lfilter,
asbytes, asstr, range)
from struct import unpack, calcsize, pack
from struct import error as struct_error
import datetime
import sys
import numpy as np
from numpy.lib._iotools import _is_string_like, easy_dtype
import statsmodels.tools.data as data_util
from pandas import isnull
def is_py3():
import sys
if sys.version_info[0] == 3:
return True
return False
PY3 = is_py3()
_date_formats = ["%tc", "%tC", "%td", "%tw", "%tm", "%tq", "%th", "%ty"]
def _datetime_to_stata_elapsed(date, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : datetime.datetime
The date to convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
if not isinstance(date, datetime.datetime):
raise ValueError("date should be datetime.datetime format")
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
delta = date - stata_epoch
return (delta.days * 86400000 + delta.seconds*1000 +
delta.microseconds/1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.", UserWarning)
return date
elif fmt in ["%td", "td"]:
return (date- stata_epoch).days
elif fmt in ["%tw", "tw"]:
return (52*(date.year-stata_epoch.year) +
(date - datetime.datetime(date.year, 1, 1)).days / 7)
elif fmt in ["%tm", "tm"]:
return (12 * (date.year - stata_epoch.year) + date.month - 1)
elif fmt in ["%tq", "tq"]:
return 4*(date.year-stata_epoch.year) + int((date.month - 1)/3)
elif fmt in ["%th", "th"]:
return 2 * (date.year - stata_epoch.year) + int(date.month > 6)
elif fmt in ["%ty", "ty"]:
return date.year
else:
raise ValueError("fmt %s not understood" % fmt)
def _stata_elapsed_date_to_datetime(date, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
date : int
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Examples
--------
>>> _stata_elapsed_date_to_datetime(52, "%tw") datetime.datetime(1961, 1, 1, 0, 0)
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
#NOTE: we could run into overflow / loss of precision situations here
# casting to int, but I'm not sure what to do. datetime won't deal with
# numpy types and numpy datetime isn't mature enough / we can't rely on
# pandas version > 0.7.1
#TODO: IIRC relative delta doesn't play well with np.datetime?
date = int(date)
stata_epoch = datetime.datetime(1960, 1, 1)
if fmt in ["%tc", "tc"]:
from dateutil.relativedelta import relativedelta
return stata_epoch + relativedelta(microseconds=date*1000)
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.",
UserWarning)
return date
elif fmt in ["%td", "td"]:
return stata_epoch + datetime.timedelta(int(date))
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = datetime.datetime(stata_epoch.year + date // 52, 1, 1)
day_delta = (date % 52 ) * 7
return year + datetime.timedelta(int(day_delta))
elif fmt in ["%tm", "tm"]:
year = stata_epoch.year + date // 12
month_delta = (date % 12 ) + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%tq", "tq"]:
year = stata_epoch.year + date // 4
month_delta = (date % 4) * 3 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%th", "th"]:
year = stata_epoch.year + date // 2
month_delta = (date % 2) * 6 + 1
return datetime.datetime(year, month_delta, 1)
elif fmt in ["%ty", "ty"]:
if date > 0:
return datetime.datetime(date, 1, 1)
else: # don't do negative years bc can't mix dtypes in column
raise ValueError("Year 0 and before not implemented")
else:
raise ValueError("Date fmt %s not understood" % fmt)
### Helper classes for StataReader ###
class _StataMissingValue(object):
"""
An observation's missing value.
Parameters
-----------
offset
value
Attributes
----------
string
value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
"""
def __init__(self, offset, value):
self._value = value
if isinstance(value, (int, long)):
self._str = value-offset is 1 and \
'.' or ('.' + chr(value-offset+96))
else:
self._str = '.'
string = property(lambda self: self._str, doc="The Stata representation of \
the missing value: '.', '.a'..'.z'")
value = property(lambda self: self._value, doc='The binary representation \
of the missing value.')
def __str__(self): return self._str
__str__.__doc__ = string.__doc__
class _StataVariable(object):
"""
A dataset variable. Not intended for public use.
Parameters
----------
variable_data
Attributes
-----------
format : str
Stata variable format. See notes for more information.
index : int
Zero-index column index of variable.
label : str
Data Label
name : str
Variable name
type : str
Stata data type. See notes for more information.
value_format : str
Value format.
Notes
-----
More information: http://www.stata.com/help.cgi?format
"""
def __init__(self, variable_data):
self._data = variable_data
def __int__(self):
return self.index
def __str__(self):
return self.name
index = property(lambda self: self._data[0], doc='the variable\'s index \
within an observation')
type = property(lambda self: self._data[1], doc='the data type of \
variable\n\nPossible types are:\n{1..244:string, b:byte, h:int, l:long, \
f:float, d:double)')
name = property(lambda self: self._data[2], doc='the name of the variable')
format = property(lambda self: self._data[4], doc='the variable\'s Stata \
format')
value_format = property(lambda self: self._data[5], doc='the variable\'s \
value format')
label = property(lambda self: self._data[6], doc='the variable\'s label')
__int__.__doc__ = index.__doc__
__str__.__doc__ = name.__doc__
class StataReader(object):
"""
Stata .dta file reader.
Provides methods to return the metadata of a Stata .dta file and
a generator for the data itself.
Parameters
----------
file : file-like
A file-like object representing a Stata .dta file.
missing_values : bool
If missing_values is True, parse missing_values and return a
Missing Values object instead of None.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
See also
--------
statsmodels.lib.io.genfromdta
Notes
-----
This is known only to work on file formats 113 (Stata 8/9), 114
(Stata 10/11), and 115 (Stata 12). Needs to be tested on older versions.
Known not to work on format 104, 108. If you have the documentation for
older formats, please contact the developers.
For more information about the .dta format see
http://www.stata.com/help.cgi?dta
http://www.stata.com/help.cgi?dta_113
"""
_header = {}
_data_location = 0
_col_sizes = ()
_has_string_data = False
_missing_values = False
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
#NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
MISSING_VALUES = { 'b': (-127,100), 'h': (-32767, 32740), 'l':
(-2147483647, 2147483620), 'f': (-1.701e+38, +1.701e+38), 'd':
(-1.798e+308, +8.988e+307) }
def __init__(self, fname, missing_values=False, encoding=None):
if encoding == None:
import locale
self._encoding = locale.getpreferredencoding()
else:
self._encoding = encoding
self._missing_values = missing_values
self._parse_header(fname)
def file_headers(self):
"""
Returns all .dta file headers.
out: dict
Has keys typlist, data_label, lbllist, varlist, nvar, filetype,
ds_format, nobs, fmtlist, vlblist, time_stamp, srtlist, byteorder
"""
return self._header
def file_format(self):
"""
Returns the file format.
Returns
-------
out : int
Notes
-----
Format 113: Stata 8/9
Format 114: Stata 10/11
Format 115: Stata 12
"""
return self._header['ds_format']
def file_label(self):
"""
Returns the dataset's label.
Returns
-------
out: string
"""
return self._header['data_label']
def file_timestamp(self):
"""
Returns the date and time Stata recorded on last file save.
Returns
-------
out : str
"""
return self._header['time_stamp']
def variables(self):
"""
Returns a list of the dataset's StataVariables objects.
"""
return lmap(_StataVariable, zip(lrange(self._header['nvar']),
self._header['typlist'], self._header['varlist'],
self._header['srtlist'],
self._header['fmtlist'], self._header['lbllist'],
self._header['vlblist']))
def dataset(self, as_dict=False):
"""
Returns a Python generator object for iterating over the dataset.
Parameters
----------
as_dict : bool, optional
If as_dict is True, yield each row of observations as a dict.
If False, yields each row of observations as a list.
Returns
-------
Generator object for iterating over the dataset. Yields each row of
observations as a list by default.
Notes
-----
If missing_values is True during instantiation of StataReader then
observations with _StataMissingValue(s) are not filtered and should
be handled by your applcation.
"""
try:
self._file.seek(self._data_location)
except Exception:
pass
if as_dict:
vars = lmap(str, self.variables())
for i in range(len(self)):
yield dict(zip(vars, self._next()))
else:
for i in range(self._header['nobs']):
yield self._next()
### Python special methods
def __len__(self):
"""
Return the number of observations in the dataset.
This value is taken directly from the header and includes observations
with missing values.
"""
return self._header['nobs']
def __getitem__(self, k):
"""
Seek to an observation indexed k in the file and return it, ordered
by Stata's output to the .dta file.
k is zero-indexed. Prefer using R.data() for performance.
"""
if not (isinstance(k, (int, long))) or k < 0 or k > len(self)-1:
raise IndexError(k)
loc = self._data_location + sum(self._col_size()) * k
if self._file.tell() != loc:
self._file.seek(loc)
return self._next()
### Private methods
def _null_terminate(self, s, encoding):
if PY3: # have bytes not strings, so must decode
null_byte = asbytes('\x00')
try:
s = s.lstrip(null_byte)[:s.index(null_byte)]
except:
pass
return s.decode(encoding)
else:
null_byte = asbytes('\x00')
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _parse_header(self, file_object):
self._file = file_object
encoding = self._encoding
# parse headers
self._header['ds_format'] = unpack('b', self._file.read(1))[0]
if self._header['ds_format'] not in [113, 114, 115]:
raise ValueError("Only file formats >= 113 (Stata >= 9)"
" are supported. Got format %s. Please report "
"if you think this error is incorrect." %
self._header['ds_format'])
byteorder = self._header['byteorder'] = unpack('b',
self._file.read(1))[0]==0x1 and '>' or '<'
self._header['filetype'] = unpack('b', self._file.read(1))[0]
self._file.read(1)
nvar = self._header['nvar'] = unpack(byteorder+'h',
self._file.read(2))[0]
self._header['nobs'] = unpack(byteorder+'i', self._file.read(4))[0]
self._header['data_label'] = self._null_terminate(self._file.read(81),
encoding)
self._header['time_stamp'] = self._null_terminate(self._file.read(18),
encoding)
# parse descriptors
typlist =[ord(self._file.read(1)) for i in range(nvar)]
self._header['typlist'] = [self.TYPE_MAP[typ] for typ in typlist]
self._header['dtyplist'] = [self.DTYPE_MAP[typ] for typ in typlist]
self._header['varlist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['srtlist'] = unpack(byteorder+('h'*(nvar+1)),
self._file.read(2*(nvar+1)))[:-1]
if self._header['ds_format'] <= 113:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(12), encoding) \
for i in range(nvar)]
else:
self._header['fmtlist'] = \
[self._null_terminate(self._file.read(49), encoding) \
for i in range(nvar)]
self._header['lbllist'] = [self._null_terminate(self._file.read(33),
encoding) for i in range(nvar)]
self._header['vlblist'] = [self._null_terminate(self._file.read(81),
encoding) for i in range(nvar)]
# ignore expansion fields
# When reading, read five bytes; the last four bytes now tell you the
# size of the next read, which you discard. You then continue like
# this until you read 5 bytes of zeros.
while True:
data_type = unpack(byteorder+'b', self._file.read(1))[0]
data_len = unpack(byteorder+'i', self._file.read(4))[0]
if data_type == 0:
break
self._file.read(data_len)
# other state vars
self._data_location = self._file.tell()
self._has_string_data = len(lfilter(lambda x: isinstance(x, int),
self._header['typlist'])) > 0
self._col_size()
def _calcsize(self, fmt):
return isinstance(fmt, int) and fmt or \
calcsize(self._header['byteorder']+fmt)
def _col_size(self, k = None):
"""Calculate size of a data record."""
if len(self._col_sizes) == 0:
self._col_sizes = lmap(lambda x: self._calcsize(x),
self._header['typlist'])
if k == None:
return self._col_sizes
else:
return self._col_sizes[k]
def _unpack(self, fmt, byt):
d = unpack(self._header['byteorder']+fmt, byt)[0]
if fmt[-1] in self.MISSING_VALUES:
nmin, nmax = self.MISSING_VALUES[fmt[-1]]
if d < nmin or d > nmax:
if self._missing_values:
return _StataMissingValue(nmax, d)
else:
return None
return d
def _next(self):
typlist = self._header['typlist']
if self._has_string_data:
data = [None]*self._header['nvar']
for i in range(len(data)):
if isinstance(typlist[i], int):
data[i] = self._null_terminate(self._file.read(typlist[i]),
self._encoding)
else:
data[i] = self._unpack(typlist[i],
self._file.read(self._col_size(i)))
return data
else:
return lmap(lambda i: self._unpack(typlist[i],
self._file.read(self._col_size(i))),
lrange(self._header['nvar']))
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
#if 'b' not in fname.mode:
return fname
if PY3:
return open(fname, "wb", encoding=encoding)
else:
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _dtype_to_stata_type(dtype):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
251 - chr(251) - for int8 and int16, byte
252 - chr(252) - for int32, int
253 - chr(253) - for int64, long
254 - chr(254) - for float32, float
255 - chr(255) - double, double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
#TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we do?
return chr(244)
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int64:
return chr(253)
elif dtype == np.int32:
return chr(252)
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return chr(251)
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _dtype_to_default_stata_fmt(dtype):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
string -> "%DDs" where DD is the length of the string
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%9.0g"
int16 -> "%9.0g"
int8 -> "%8.0g"
"""
#TODO: expand this to handle a default datetime format?
if dtype.type == np.string_:
return "%" + str(dtype.itemsize) + "s"
elif dtype.type == np.object_:
return "%244s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int64:
return "%9.0g"
elif dtype == np.int32:
return "%8.0g"
elif dtype == np.int8 or dtype == np.int16: # ok to assume bytes?
return "%8.0g"
else: # pragma : no cover
raise ValueError("Data type %s not currently understood. "
"Please report an error to the developers." % dtype)
def _pad_bytes(name, length):
"""
Takes a char string and pads it wih null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _default_names(nvar):
"""
Returns default Stata names v1, v2, ... vnvar
"""
return ["v%d" % i for i in range(1,nvar+1)]
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise ValueError("fmt %s not understood" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key) : convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convery_dates key is not in varlist "
"and is not an int")
new_dict.update({key : convert_dates[key]})
return new_dict
_type_converters = {253 : np.long, 252 : int}
class StataWriter(object):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
data : array-like
Array-like input to save. Pandas objects are also accepted.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', date, {2 : 'tw'})
>>> writer.write_file()
"""
#type code
#--------------------
#str1 1 = 0x01
#str2 2 = 0x02
#...
#str244 244 = 0xf4
#byte 251 = 0xfb (sic)
#int 252 = 0xfc
#long 253 = 0xfd
#float 254 = 0xfe
#double 255 = 0xff
#--------------------
#NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
DTYPE_MAP = dict(lzip(lrange(1,245), ['a' + str(i) for i in range(1,245)]) + \
[(251, np.int16),(252, np.int32),(253, int),
(254, np.float32), (255, np.float64)])
TYPE_MAP = lrange(251)+list('bhlfd')
MISSING_VALUES = { 'b': 101,
'h': 32741,
'l' : 2147483621,
'f': 1.7014118346046923e+38,
'd': 8.98846567431158e+307}
def __init__(self, fname, data, convert_dates=None, encoding="latin-1",
byteorder=None):
self._convert_dates = convert_dates
# attach nobs, nvars, data, varlist, typlist
if data_util._is_using_pandas(data, None):
self._prepare_pandas(data)
elif data_util._is_array_like(data, None):
data = np.asarray(data)
if data_util._is_structured_ndarray(data):
self._prepare_structured_array(data)
else:
if convert_dates is not None:
raise ValueError("Not able to convert dates in a plain"
" ndarray.")
self._prepare_ndarray(data)
else: # pragma : no cover
raise ValueError("Type %s for data not understood" % type(data))
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._encoding = encoding
self._file = _open_file_binary_write(fname, encoding)
def _write(self, to_write):
"""
Helper to call asbytes before writing to file for Python 3 compat.
"""
self._file.write(asbytes(to_write))
def _prepare_structured_array(self, data):
self.nobs = len(data)
self.nvar = len(data.dtype)
self.data = data
self.datarows = iter(data)
dtype = data.dtype
descr = dtype.descr
if dtype.names is None:
varlist = _default_names(nvar)
else:
varlist = dtype.names
# check for datetime and change the type
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
varlist)
self._convert_dates = convert_dates
for key in convert_dates:
descr[key] = (
descr[key][0],
_convert_datetime_to_stata_type(convert_dates[key])
)
dtype = np.dtype(descr)
self.varlist = varlist
self.typlist = [_dtype_to_stata_type(dtype[i])
for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype[i])
for i in range(self.nvar)]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def _prepare_ndarray(self, data):
if data.ndim == 1:
data = data[:,None]
self.nobs, self.nvar = data.shape
self.data = data
self.datarows = iter(data)
#TODO: this should be user settable
dtype = data.dtype
self.varlist = _default_names(self.nvar)
self.typlist = [_dtype_to_stata_type(dtype) for i in range(self.nvar)]
self.fmtlist = [_dtype_to_default_stata_fmt(dtype)
for i in range(self.nvar)]
def _prepare_pandas(self, data):
#NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
class DataFrameRowIter(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for i, row in data.iterrows():
yield row
data = data.reset_index()
self.datarows = DataFrameRowIter(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
convert_dates = self._convert_dates
if convert_dates is not None:
convert_dates = _maybe_convert_to_int_keys(convert_dates,
self.varlist)
self._convert_dates = convert_dates
for key in convert_dates:
new_type = _convert_datetime_to_stata_type(convert_dates[key])
dtypes[key] = np.dtype(new_type)
self.typlist = [_dtype_to_stata_type(dt) for dt in dtypes]
self.fmtlist = [_dtype_to_default_stata_fmt(dt) for dt in dtypes]
# set the given format for the datetime cols
if convert_dates is not None:
for key in convert_dates:
self.fmtlist[key] = convert_dates[key]
def write_file(self):
self._write_header()
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
if self._convert_dates is None:
self._write_data_nodates()
else:
self._write_data_dates()
#self._write_value_labels()
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._write(pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write(pack(byteorder+"h", self.nvar)[:2])
# number of obs, 4 bytes
self._write(pack(byteorder+"i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write(self._null_terminate(_pad_bytes("", 80),
self._encoding))
else:
self._write(self._null_terminate(_pad_bytes(data_label[:80],
80), self._encoding))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime):
raise ValueError("time_stamp should be datetime type")
self._write(self._null_terminate(
time_stamp.strftime("%d %b %Y %H:%M"),
self._encoding))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist, length 33*nvar, char array, null terminated
for name in self.varlist:
name = self._null_terminate(name, self._encoding)
name = _pad_bytes(asstr(name[:32]), 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", (2*(nvar+1)))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
#NOTE: this is where you could get fancy with pandas categorical type
for i in range(nvar):
self._write(_pad_bytes("", 33))
def _write_variable_labels(self, labels=None):
nvar = self.nvar
if labels is None:
for i in range(nvar):
self._write(_pad_bytes("", 81))
def _write_data_nodates(self):
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
if typ <= 244: # we've got a string
if len(var) < typ:
var = _pad_bytes(asstr(var), len(var) + 1)
self._write(var)
else:
try:
self._write(pack(byteorder+TYPE_MAP[typ], var))
except struct_error:
# have to be strict about type pack won't do any
# kind of casting
self._write(pack(byteorder+TYPE_MAP[typ],
_type_converters[typ](var)))
def _write_data_dates(self):
convert_dates = self._convert_dates
data = self.datarows
byteorder = self._byteorder
TYPE_MAP = self.TYPE_MAP
MISSING_VALUES = self.MISSING_VALUES
typlist = self.typlist
for row in data:
#row = row.squeeze().tolist() # needed for structured arrays
for i,var in enumerate(row):
typ = ord(typlist[i])
#NOTE: If anyone finds this terribly slow, there is
# a vectorized way to convert dates, see genfromdta for going
# from int to datetime and reverse it. will copy data though
if i in convert_dates:
var = _datetime_to_stata_elapsed(var, self.fmtlist[i])
if typ <= 244: # we've got a string
if isnull(var):
var = "" # missing string
if len(var) < typ:
var = _pad_bytes(var, len(var) + 1)
self._write(var)
else:
if isnull(var): # this only matters for floats
var = MISSING_VALUES[typ]
self._write(pack(byteorder+TYPE_MAP[typ], var))
def _null_terminate(self, s, encoding):
null_byte = '\x00'
if PY3:
s += null_byte
return s.encode(encoding)
else:
s += null_byte
return s
def genfromdta(fname, missing_flt=-999., encoding=None, pandas=False,
convert_dates=True):
"""
Returns an ndarray or DataFrame from a Stata .dta file.
Parameters
----------
fname : str or filehandle
Stata .dta file.
missing_flt : numeric
The numeric value to replace missing values with. Will be used for
any numeric value.
encoding : string, optional
Used for Python 3 only. Encoding to use when reading the .dta file.
Defaults to `locale.getpreferredencoding`
pandas : bool
Optionally return a DataFrame instead of an ndarray
convert_dates : bool
If convert_dates is True, then Stata formatted dates will be converted
to datetime types according to the variable's format.
"""
if isinstance(fname, string_types):
fhd = StataReader(open(fname, 'rb'), missing_values=False,
encoding=encoding)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = StataReader(fname, missing_values=False, encoding=encoding)
# validate_names = np.lib._iotools.NameValidator(excludelist=excludelist,
# deletechars=deletechars,
# case_sensitive=case_sensitive)
#TODO: This needs to handle the byteorder?
header = fhd.file_headers()
types = header['dtyplist']
nobs = header['nobs']
numvars = header['nvar']
varnames = header['varlist']
fmtlist = header['fmtlist']
dataname = header['data_label']
labels = header['vlblist'] # labels are thrown away unless DataArray
# type is used
data = np.zeros((nobs,numvars))
stata_dta = fhd.dataset()
dt = np.dtype(lzip(varnames, types))
data = np.zeros((nobs), dtype=dt) # init final array
for rownum,line in enumerate(stata_dta):
# doesn't handle missing value objects, just casts
# None will only work without missing value object.
if None in line:
for i,val in enumerate(line):
#NOTE: This will only be scalar types because missing strings
# are empty not None in Stata
if val is None:
line[i] = missing_flt
data[rownum] = tuple(line)
if pandas:
from pandas import DataFrame
data = DataFrame.from_records(data)
if convert_dates:
cols = np.where(lmap(lambda x : x in _date_formats, fmtlist))[0]
for col in cols:
i = col
col = data.columns[col]
data[col] = data[col].apply(_stata_elapsed_date_to_datetime,
args=(fmtlist[i],))
elif convert_dates:
#date_cols = np.where(map(lambda x : x in _date_formats,
# fmtlist))[0]
# make the dtype for the datetime types
cols = np.where(lmap(lambda x : x in _date_formats, fmtlist))[0]
dtype = data.dtype.descr
dtype = [(dt[0], object) if i in cols else dt for i,dt in
enumerate(dtype)]
data = data.astype(dtype) # have to copy
for col in cols:
def convert(x):
return _stata_elapsed_date_to_datetime(x, fmtlist[col])
data[data.dtype.names[col]] = lmap(convert,
data[data.dtype.names[col]])
return data
def savetxt(fname, X, names=None, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
This is just a copy of numpy.savetxt patched to support structured arrays
or a header of names. Does not include py3 support now in savetxt.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
names : list, optional
If given names will be the column header in the text file. If None and
X is a structured or recarray then the names are taken from
X.dtype.names.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # x is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
fh = file(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if isinstance(fmt, (list, tuple)):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif isinstance(fmt, string_types):
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
# handle names
if names is None and X.dtype.names:
names = X.dtype.names
if names is not None:
fh.write(delimiter.join(names) + '\n')
for row in X:
fh.write(format % tuple(row) + '\n')
if __name__ == "__main__":
import os
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/transforms.py | 1 | 77073 | """
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
import numpy as np
from numpy import ma
from matplotlib._path import affine_transform
from numpy.linalg import inv
from weakref import WeakKeyDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
import cbook
from path import Path
from _path import count_bboxes_overlapping_bbox, update_path_extents
DEBUG = False
if DEBUG:
import warnings
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
# If pass_through is True, all ancestors will always be
# invalidated, even if 'self' is already invalid.
pass_through = False
def __init__(self):
"""
Creates a new :class:`TransformNode`.
"""
# Parents are stored in a WeakKeyDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakKeyDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and all of its
ancestors. Should be called any time the transform changes.
"""
# If we are an affine transform being changed, we can set the
# flag to INVALID_AFFINE_ONLY
value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID
# Shortcut: If self is already invalid, that means its parents
# are as well, so we don't need to do anything.
if self._invalid == value:
return
if not len(self._parents):
self._invalid = value
return
# Invalidate all ancestors of self using pseudo-recursion.
stack = [self]
while len(stack):
root = stack.pop()
# Stop at subtrees that have already been invalidated
if root._invalid != value or root.pass_through:
root._invalid = self.INVALID
stack.extend(root._parents.keys())
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[self] = None
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val) for key, val in props.items()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.items():
if val is child:
name = key
break
fobj.write('%s -> %s [label="%s", fontsize=10];\n' % (
hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
else:
def write_graphviz(self, fobj, highlight=[]):
return
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicity.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1,0] - points[0,0] == 0 or
points[1,1] - points[0,1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates that
define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates that
define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the bottom-left
corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the top-right
corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (x > y0 and x < y1))
or (x > y1 and x < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0,0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container = None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, str):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w-W)) - L,
(b + cy * (h-H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = 1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect/fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect/box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points):
self._check(points)
self.___init__(points)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __repr__(self):
return 'Bbox(%s)' % repr(self._points)
__str__ = __repr__
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:,0] = points[:,0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:,1] = points[:,1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l+w, b+h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
'return whether the bbox has changed since init'
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
'return whether the x-limits have changed since init'
return (self._points[0,0]!=self._points_orig[0,0] or
self._points[1,0]!=self._points_orig[1,0])
def mutatedy(self):
'return whether the y-limits have changed since init'
return (self._points[0,1]!=self._points_orig[0,1] or
self._points[1,1]!=self._points_orig[1,1])
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%s, %s)" % (self._bbox, self._transform)
__str__ = __repr__
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :meth:`has_inverse` can return True)
If the transform needs to do something non-standard with
:class:`mathplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
# The number of input and output dimensions for this transform.
# These must be overridden (with integers) in the subclass.
input_dims = None
output_dims = None
# True if this transform as a corresponding inverse transform.
has_inverse = False
# True if this transform is separable in the x- and y- dimensions.
is_separable = False
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __array__(self, *args, **kwargs):
"""
Used by C/C++ -based backends to get at the array matrix data.
"""
raise NotImplementedError
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
raise NotImplementedError()
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform(values)
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed copy of path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return Path(self.transform(path.vertices), path.codes,
path._interpolation_steps)
def transform_path_affine(self, path):
"""
Returns a copy of path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return path
def transform_path_non_affine(self, path):
"""
Returns a copy of path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes,
path._interpolation_steps)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims <> 2 or self.output_dims <> 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[ np.cos(angles), np.sin(angles) ]
# Transform both sets of points
tpts = self.transform( pts )
tpts2 = self.transform( pts2 )
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2( d[:,1], d[:,0] )
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
is_affine = False
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __repr__(self):
return "TransformWrapper(%r)" % self._child
__str__ = __repr__
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self):
Transform.__init__(self)
self._inverted = None
def __array__(self, *args, **kwargs):
return self.get_matrix()
@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
def get_matrix(self):
"""
Get the underlying transformation matrix as a numpy array.
"""
raise NotImplementedError()
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_affine(self, path):
return self.transform_path(path)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
#* Redundant: Removed for performance
#
# def __init__(self):
# Affine2DBase.__init__(self)
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def __array__(self, *args, **kwargs):
return self.get_matrix()
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform = transform
def transform(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(values))
return self._transform(points)
transform.__doc__ = AffineBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
self._inverted = Affine2D(inv(mtx))
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix = None):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
__str__ = __repr__
def __cmp__(self, other):
if (isinstance(other, Affine2D) and
(self.get_matrix() == other.get_matrix()).all()):
return 0
return -1
@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3,3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees*np.pi/180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
__str__ = __repr__
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def transform(self, points):
x = self._x
y = self._y
if x is y and x.input_dims == 2:
return x.transform(points)
if x.input_dims == 2:
x_points = x.transform(points)[:, 0:1]
else:
x_points = x.transform(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform(points)[:, 1:]
else:
y_points = y.transform(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
return self.transform(points)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x.is_affine and self._y.is_affine:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
else:
self._affine = IdentityTransform()
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
def __repr__(self):
return "CompositeGenericTransform(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def transform(self, points):
return self._b.transform(
self._a.transform(points))
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self._b.transform_path(
self._a.transform_path(path))
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return self._b.transform_path_affine(
self._a.transform_path(path))
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if self._a.is_affine and self._b.is_affine:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
else:
return self._b.get_affine()
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
def __repr__(self):
return "CompositeAffine2D(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, AffineBase) and isinstance(b, AffineBase):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%s, %s)" % (self._boxin, self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%s)" % (self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox` with a fixed upper left of (0, 0).
"""
def __repr__(self):
return "BboxTransformToMaxOnly(%s)" % (self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin):
assert boxin.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%s)" % (self._boxin)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans):
Affine2DBase.__init__(self)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%s)" % (self._t,)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices),
None, self._path._interpolation_steps)
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._invalid = 0
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Ensure the endpoints of a range are finite and not too close together.
"too close" means the interval is smaller than 'tiny' times
the maximum absolute value.
If they are too close, each will be moved by the 'expander'.
If 'increasing' is True and vmin > vmax, they will be swapped,
regardless of whether they are too close.
If either is inf or -inf or nan, return - expander, expander.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmin == 0.0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| gpl-2.0 |
wangyum/mxnet | example/dec/dec.py | 24 | 7846 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
Hiyorimi/scikit-image | doc/source/conf.py | 5 | 12320 | # -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import skimage
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'numpydoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx_gallery.gen_gallery'
]
autosummary_generate = True
#------------------------------------------------------------------------
# Sphinx-gallery configuration
#------------------------------------------------------------------------
sphinx_gallery_conf = {
'doc_module' : 'skimage',
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples',
'mod_example_dir': 'api',
'reference_url' : {
'skimage': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',}
}
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../skimage/__init__.py') as f:
setup_lines = f.readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_version_str = '{0.major}.{0.minor}'.format(sys.version_info)
_python_doc_base = 'http://docs.python.org/' + _python_version_str
intersphinx_mapping = {
'python': (_python_doc_base, None),
'numpy': ('http://docs.scipy.org/doc/numpy',
(None, './_intersphinx/numpy-objects.inv')),
'scipy': ('http://docs.scipy.org/doc/scipy/reference',
(None, './_intersphinx/scipy-objects.inv')),
'sklearn': ('http://scikit-learn.org/stable',
(None, './_intersphinx/sklearn-objects.inv')),
'matplotlib': ('http://matplotlib.org/',
(None, 'http://matplotlib.org/objects.inv'))
}
# ----------------------------------------------------------------------------
# Source code links
# ----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
# Function courtesy of NumPy to return URLs containing line numbers
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skimage.__file__))
if 'dev' in skimage.__version__:
return ("http://github.com/scikit-image/scikit-image/blob/"
"master/skimage/%s%s" % (fn, linespec))
else:
return ("http://github.com/scikit-image/scikit-image/blob/"
"v%s/skimage/%s%s" % (skimage.__version__, fn, linespec))
| bsd-3-clause |
ilayn/scipy | scipy/ndimage/filters.py | 12 | 55835 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections.abc import Iterable
import warnings
import numpy
import operator
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
def _invalid_origin(origin, lenw):
return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
"""Complex convolution via a linear combination of real convolutions."""
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input and complex_weights:
# real component of the output
func(input.real, weights.real, output=output.real,
cval=numpy.real(cval), **kwargs)
output.real -= func(input.imag, weights.imag, output=None,
cval=numpy.imag(cval), **kwargs)
# imaginary component of the output
func(input.real, weights.imag, output=output.imag,
cval=numpy.real(cval), **kwargs)
output.imag += func(input.imag, weights.real, output=None,
cval=numpy.imag(cval), **kwargs)
elif complex_input:
func(input.real, weights, output=output.real, cval=numpy.real(cval),
**kwargs)
func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
**kwargs)
else:
if numpy.iscomplexobj(cval):
raise ValueError("Cannot provide a complex-valued cval when the "
"input is real.")
func(input, weights.real, output=output.real, cval=cval, **kwargs)
func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
return output
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights:
weights = weights.conj()
weights = weights.astype(numpy.complex128, copy=False)
kwargs = dict(axis=axis, mode=mode, origin=origin)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(correlate1d, input, weights,
output, cval, **kwargs)
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = normalize_axis_index(axis, input.ndim)
if _invalid_origin(origin, len(weights)):
raise ValueError('Invalid origin; origin must satisfy '
'-(len(weights) // 2) <= origin <= '
'(len(weights)-1) // 2')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
weights = numpy.asarray(weights)
if weights.dtype.kind == 'c':
# pre-conjugate here to counteract the conjugation in correlate1d
weights = weights.conj()
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1-D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
exponent_range = numpy.arange(order + 1)
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius+1)
phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.sum()
if order == 0:
return phi_x
else:
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = numpy.zeros(order + 1)
q[0] = 1
D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
for _ in range(order):
q = Q_deriv.dot(q)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""1-D Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode_reflect)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-D Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-D Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using Gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights and not convolution:
# As for numpy.correlate, conjugate weights rather than input.
weights = weights.conj()
kwargs = dict(
mode=mode, origin=origin, convolution=convolution
)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(_correlate_or_convolve, input,
weights, output, cval, **kwargs)
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if _invalid_origin(origin, lenw):
raise ValueError('Invalid origin; origin must satisfy '
'-(weights.shape[k] // 2) <= origin[k] <= '
'(weights.shape[k]-1) // 2')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError("A sequence of modes is not supported")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
result : ndarray
The result of correlation of `input` with `weights`.
See Also
--------
convolve : Convolve an image with a kernel.
Examples
--------
Correlation is the process of moving a filter mask often referred to
as kernel over the image and computing the sum of products at each location.
>>> from scipy.ndimage import correlate
>>> input_img = np.arange(25).reshape(5,5)
>>> print(input_img)
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]
[15 16 17 18 19]
[20 21 22 23 24]]
Define a kernel (weights) for correlation. In this example, it is for sum of
center and up, down, left and right next elements.
>>> weights = [[0, 1, 0],
... [1, 1, 1],
... [0, 1, 0]]
We can calculate a correlation result:
For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
>>> correlate(input_img, weights)
array([[ 6, 10, 15, 20, 24],
[ 26, 30, 35, 40, 44],
[ 51, 55, 60, 65, 69],
[ 76, 80, 85, 90, 94],
[ 96, 100, 105, 110, 114]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the N-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e., where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`) are treated as zeros.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
complex_output = input.dtype.kind == 'c'
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
if not complex_output:
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
else:
_nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
numpy.real(cval), origin)
_nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
numpy.imag(cval), origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multidimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input,
complex_output=input.dtype.kind == 'c')
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported for non-separable "
"footprints")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported by non-separable rank "
"filters")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
rank = operator.index(rank)
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
See also
--------
scipy.signal.medfilt2d
Notes
-----
For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
the specialised function `scipy.signal.medfilt2d` may be faster. It is
however limited to constant mode with ``cval=0``.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a 1-D filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1-D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = normalize_axis_index(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multidimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1-D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| bsd-3-clause |
jakevdp/scipy | scipy/spatial/tests/test__plotutils.py | 55 | 1567 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
blbarker/spark-tk | regression-tests/sparktkregtests/testcases/models/svm_2d_slope1_test.py | 10 | 3131 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Validate svm"""
import unittest
from sparktkregtests.lib import sparktk_test
class Svm2DSlope1(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build the frame needed for these tests"""
super(Svm2DSlope1, self).setUp()
sch2 = [("Class", int), # Class is either 1 or 0.
("Dim_1", float),
("Dim_2", float)]
train_file = self.get_file("SVM-2F-train-50X50_1SlopePlus0.csv")
test_file = self.get_file("SVM-2F-test-50X50_1SlopePlus0.csv")
self.trainer = self.context.frame.import_csv(train_file,
schema=sch2)
self.frame = self.context.frame.import_csv(test_file,
schema=sch2)
def test_svm_model_test(self):
"""Test with train and test data generated with same hyperplane"""
model = self.context.models.classification.svm.train(self.trainer,
["Dim_1", "Dim_2"],
"Class")
results = model.test(self.frame)
# assert that model reports acceptable accuracy, etc.
self.assertEqual(1.0, results.recall)
self.assertEqual(1.0, results.accuracy)
self.assertEqual(1.0, results.precision)
self.assertEqual(1.0, results.f_measure)
# Now we verify the confusion matrix contains the expected results.
cf = results.confusion_matrix
self.assertEqual(cf['Predicted_Pos']['Actual_Pos'], 95)
self.assertEqual(cf['Predicted_Neg']['Actual_Pos'], 0)
self.assertEqual(cf['Predicted_Pos']['Actual_Neg'], 0)
self.assertEqual(cf['Predicted_Neg']['Actual_Neg'], 105)
def test_svm_model_predict(self):
"""Test the predict function"""
model = self.context.models.classification.svm.train(self.trainer,
["Dim_1", "Dim_2"],
"Class")
predicted_frame = model.predict(self.frame)
outcome = predicted_frame.to_pandas()
# Verify that values in 'predict' and 'Class' columns match.
for index, row in outcome.iterrows():
self.assertEqual(row["Class"], row["predicted_label"])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
wilsonkichoi/zipline | zipline/pipeline/loaders/utils.py | 3 | 12108 | import datetime
import numpy as np
import pandas as pd
from six import iteritems
from six.moves import zip
from zipline.utils.numpy_utils import categorical_dtype, NaTns
def next_event_frame(events_by_sid,
dates,
missing_value,
field_dtype,
event_date_field_name,
return_field_name):
"""
Make a DataFrame representing the simulated next known dates or values
for an event.
Parameters
----------
dates : pd.DatetimeIndex.
The index of the returned DataFrame.
events_by_sid : dict[int -> pd.Series]
Dict mapping sids to a series of dates. Each k:v pair of the series
represents the date we learned of the event mapping to the date the
event will occur.
event_date_field_name : str
The name of the date field that marks when the event occurred.
Returns
-------
next_events: pd.DataFrame
A DataFrame where each column is a security from `events_by_sid` where
the values are the dates of the next known event with the knowledge we
had on the date of the index. Entries falling after the last date will
have `NaT` as the result in the output.
See Also
--------
previous_date_frame
"""
date_cols = {
equity: np.full_like(dates, NaTns) for equity in events_by_sid
}
value_cols = {
equity: np.full(len(dates), missing_value, dtype=field_dtype)
for equity in events_by_sid
}
raw_dates = dates.values
for equity, df in iteritems(events_by_sid):
event_dates = df[event_date_field_name]
values = df[return_field_name]
data = date_cols[equity]
if not event_dates.index.is_monotonic_increasing:
event_dates = event_dates.sort_index()
# Iterate over the raw Series values, since we're comparing against
# numpy arrays anyway.
iter_date_vals = zip(event_dates.index.values, event_dates.values,
values)
for knowledge_date, event_date, value in iter_date_vals:
date_mask = (
(knowledge_date <= raw_dates) &
(raw_dates <= event_date)
)
value_mask = (event_date <= data) | (data == NaTns)
data_indices = np.where(date_mask & value_mask)
data[data_indices] = event_date
value_cols[equity][data_indices] = value
return pd.DataFrame(index=dates, data=value_cols)
def previous_event_frame(events_by_sid,
date_index,
missing_value,
field_dtype,
event_date_field,
previous_return_field):
"""
Make a DataFrame representing simulated previous dates or values for an
event.
Parameters
----------
events_by_sid : dict[int -> DatetimeIndex]
Dict mapping sids to a series of dates. Each k:v pair of the series
represents the date we learned of the event mapping to the date the
event will occur.
date_index : DatetimeIndex.
The index of the returned DataFrame.
missing_value : any
Data which missing values should be filled with.
field_dtype: any
The dtype of the field for which the previous values are being
retrieved.
event_date_field: str
The name of the date field that marks when the event occurred.
return_field: str
The name of the field for which the previous values are being
retrieved.
Returns
-------
previous_events: pd.DataFrame
A DataFrame where each column is a security from `events_by_sid` and
the values are the values for the previous event that occurred on the
date of the index. Entries falling before the first date will have
`missing_value` filled in as the result in the output.
See Also
--------
next_date_frame
"""
sids = list(events_by_sid)
populate_value = None if field_dtype == categorical_dtype else \
missing_value
out = np.full(
(len(date_index), len(sids)),
populate_value,
dtype=field_dtype
)
d_n = date_index[-1].asm8
for col_idx, sid in enumerate(sids):
# events_by_sid[sid] is a DataFrame mapping knowledge_date to event
# date and values.
df = events_by_sid[sid]
df = df[df[event_date_field] <= d_n]
event_date_vals = df[event_date_field].values
# Get knowledge dates corresponding to the values in which we are
# interested
kd_vals = df[df[event_date_field] <= d_n].index.values
# The date at which a previous event is first known is the max of the
# kd and the event date.
index_dates = np.maximum(kd_vals, event_date_vals)
out[
date_index.searchsorted(index_dates), col_idx
] = df[previous_return_field]
frame = pd.DataFrame(out, index=date_index, columns=sids)
frame.ffill(inplace=True)
if field_dtype == categorical_dtype:
frame[frame.isnull()] = missing_value
return frame
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = dtidx_local_time.time >= time
# for all of the times that are greater than our query time add 1
# day and truncate to the date
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc') # cast back to utc
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
def zip_with_floats(dates, flts):
return pd.Series(flts, index=dates, dtype='float')
def zip_with_strs(dates, strs):
return pd.Series(strs, index=dates, dtype='object')
def zip_with_dates(index_dates, dts):
return pd.Series(pd.to_datetime(dts), index=index_dates)
def get_values_for_date_ranges(zip_date_index_with_vals,
vals_for_date_intervals,
starts,
ends,
date_index):
"""
Returns a Series of values indexed by date based on the intervals defined
by the start and end dates.
Parameters
----------
zip_date_index_with_vals : callable
A function that takes in a list of dates and a list of values and
returns a pd.Series with the values indexed by the dates.
vals_for_date_intervals : list
A list of values for each date interval in `date_intervals`.
starts : DatetimeIndex
A DatetimeIndex of start dates.
ends : list
A DatetimeIndex of end dates.
date_index : DatetimeIndex
The DatetimeIndex containing all dates for which values were requested.
Returns
-------
date_index_with_vals : pd.Series
A Series indexed by the given DatetimeIndex and with values assigned
to dates based on the given date intervals.
"""
# Fill in given values for given date ranges.
end_indexes = date_index.values.searchsorted(ends)
start_indexes = date_index.values.searchsorted(starts)
num_days = (end_indexes - start_indexes) + 1
# In case any of the end dates falls on days missing from the date_index,
# searchsorted will have placed their index within `date_index` to the
# index of the next start date, so we will have added 1 extra day for
# each of these. Subtract those extra days, but ignore any cases where the
# start and end dates are equal. Note: if any of the start dates is
# missing, it won't affect calculations because searchsorted will advance
# the index to the next date within the same range.
num_days[np.where(~np.in1d(ends, date_index) & (num_days != 0))] -= 1
return zip_date_index_with_vals(
date_index,
np.repeat(
vals_for_date_intervals,
num_days,
)
)
| apache-2.0 |
Clyde-fare/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
karasinski/NACAFoil-OpenFOAM | plot.py | 1 | 1969 | #!/usr/bin/env python
"""
This script plots various quantities.
"""
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
ylabels = {"cl": r"$C_l$", "cd": r"$C_d$", "cl/cd": r"$C_l/C_d$", "k": "$k$",
"omega": r"$\omega$", "epsilon": r"$\epsilon$"}
def plot_foil_perf(quantity="cl/cd", foil="0012", Re=2e5):
df = pd.read_csv("processed/NACA{}_{:.1e}.csv".format(foil, Re))
plt.figure()
if quantity == "cl/cd":
q = df.cl/df.cd
else:
q = df[quantity]
plt.plot(df.alpha_deg, q, "-o")
plt.xlabel(r"$\alpha$ (deg)")
plt.ylabel(ylabels[quantity])
plt.grid(True)
plt.tight_layout()
if __name__ == "__main__":
try:
import seaborn
seaborn.set(style="white", context="notebook", font_scale=1.5)
except ImportError:
print("Could not import seaborn for plot styling. Try")
print("\n conda install seaborn\n\nor")
print("\n pip install seaborn\n")
parser = argparse.ArgumentParser(description="Plotting results")
parser.add_argument("quantity", nargs="?", default="cl/cd",
help="Which quantity to plot",
choices=["cl", "cd", "cl/cd", "k", "omega", "epsilon"])
parser.add_argument("--foil", "-f", help="Foil", default="0012")
parser.add_argument("--Reynolds", "-R", help="Reynolds number", default=2e5)
parser.add_argument("--save", "-s", action="store_true", help="Save plots")
parser.add_argument("--noshow", action="store_true", default=False,
help="Do not show")
args = parser.parse_args()
plot_foil_perf(args.quantity, args.foil, float(args.Reynolds))
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
plt.savefig("figures/{}.pdf".format(args.quantity))
if not args.noshow:
plt.show()
| gpl-3.0 |
huzq/scikit-learn | examples/classification/plot_classifier_comparison.py | 34 | 5239 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
Myasuka/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
gpetretto/pymatgen | pymatgen/analysis/transition_state.py | 3 | 17253 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import glob
import numpy as np
from monty.json import jsanitize
from monty.json import MSONable
scipy_old_piecewisepolynomial = True
try:
from scipy.interpolate import PiecewisePolynomial
except ImportError:
from scipy.interpolate import CubicSpline
scipy_old_piecewisepolynomial = False
from pymatgen.util.plotting import pretty_plot
from pymatgen.io.vasp import Poscar, Outcar
from pymatgen.analysis.structure_matcher import StructureMatcher
import warnings
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '6/1/15'
class NEBAnalysis(MSONable):
"""
An NEBAnalysis class.
"""
def __init__(self, r, energies, forces, structures, spline_options=None):
"""
Initializes an NEBAnalysis from the cumulative root mean squared distances
between structures, the energies, the forces, the structures and the
interpolation_order for the analysis.
Args:
r: Root mean square distances between structures
energies: Energies of each structure along reaction coordinate
forces: Tangent forces along the reaction coordinate.
structures ([Structure]): List of Structures along reaction
coordinate.
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.r = np.array(r)
self.energies = np.array(energies)
self.forces = np.array(forces)
self.structures = structures
self.spline_options = spline_options if spline_options is not None \
else {}
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.setup_spline(spline_options=self.spline_options)
def setup_spline(self, spline_options=None):
"""
Setup of the options for the spline interpolation
Args:
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.spline_options = spline_options
relative_energies = self.energies - self.energies[0]
if scipy_old_piecewisepolynomial:
if self.spline_options:
raise RuntimeError('Option for saddle point not available with'
'old scipy implementation')
self.spline = PiecewisePolynomial(
self.r, np.array([relative_energies, -self.forces]).T,
orders=3)
else:
# New scipy implementation for scipy > 0.18.0
if self.spline_options.get('saddle_point', '') == 'zero_slope':
imax = np.argmax(relative_energies)
self.spline = CubicSpline(x=self.r[:imax + 1],
y=relative_energies[:imax + 1],
bc_type=((1, 0.0), (1, 0.0)))
cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
bc_type=((1, 0.0), (1, 0.0)))
self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
else:
self.spline = CubicSpline(x=self.r, y=relative_energies,
bc_type=((1, 0.0), (1, 0.0)))
@classmethod
def from_outcars(cls, outcars, structures, **kwargs):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
forces = np.array(forces)
r = np.array(r)
return cls(r=r, energies=energies, forces=forces,
structures=structures, **kwargs)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(self.r * scale, relative_energies * 1000, 'ro',
x * scale, y, 'k-', linewidth=2, markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % (np.max(y) - np.min(y)),
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i == 0 or i == neb_dirs[-1][0]
if terminal:
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
break
else:
raise ValueError("OUTCAR cannot be found for terminal "
"point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis.from_outcars(outcars, structures, **kwargs)
def as_dict(self):
"""
Dict representation of NEBAnalysis.
Returns:
JSON serializable dict representation.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
'r': jsanitize(self.r),
'energies': jsanitize(self.energies),
'forces': jsanitize(self.forces),
'structures': [s.as_dict() for s in self.structures]}
def combine_neb_plots(neb_analyses, arranged_neb_analyses=False,
reverse_plot=False):
"""
neb_analyses: a list of NEBAnalysis objects
arranged_neb_analyses: The code connects two end points with the
smallest-energy difference. If all end points have very close energies, it's
likely to result in an inaccurate connection. Manually arrange neb_analyses
if the combined plot is not as expected compared with all individual plots.
E.g., if there are two NEBAnalysis objects to combine, arrange in such a
way that the end-point energy of the first NEBAnalysis object is the
start-point energy of the second NEBAnalysis object.
Note that the barrier labeled in y-axis in the combined plot might be
different from that in the individual plot due to the reference energy used.
reverse_plot: reverse the plot or percolation direction.
return: a NEBAnalysis object
"""
x = StructureMatcher()
for neb_index in range(len(neb_analyses)):
if neb_index == 0:
neb1 = neb_analyses[neb_index]
neb1_energies = list(neb1.energies)
neb1_structures = neb1.structures
neb1_forces = neb1.forces
neb1_r = neb1.r
continue
neb2 = neb_analyses[neb_index]
neb2_energies = list(neb2.energies)
matching = 0
for neb1_s in [neb1_structures[0], neb1_structures[-1]]:
if x.fit(neb1_s, neb2.structures[0]) or \
x.fit(neb1_s, neb2.structures[-1]):
matching += 1
break
if matching == 0:
raise ValueError("no matched structures for connection!")
neb1_start_e, neb1_end_e = neb1_energies[0], neb1_energies[-1]
neb2_start_e, neb2_end_e = neb2_energies[0], neb2_energies[-1]
min_e_diff = min(([abs(neb1_start_e - neb2_start_e),
abs(neb1_start_e - neb2_end_e),
abs(neb1_end_e - neb2_start_e),
abs(neb1_end_e - neb2_end_e)]))
if arranged_neb_analyses:
neb1_energies = neb1_energies[0:len(neb1_energies) - 1] \
+ [(neb1_energies[-1] + neb2_energies[0]) / 2] \
+ neb2_energies[
1:]
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in
list(neb2.r)[1:]]
elif abs(neb1_start_e - neb2_start_e) == min_e_diff:
neb1_energies = list(reversed(neb1_energies[1:])) + neb2_energies
neb1_structures = list(
reversed((neb1_structures[1:]))) + neb2.structures
neb1_forces = list(reversed(list(neb1_forces)[1:])) + list(
neb2.forces)
neb1_r = list(reversed(
[i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)[1:]])) + [
i + neb1_r[-1] for i in list(neb2.r)]
elif abs(neb1_start_e - neb2_end_e) == min_e_diff:
neb1_energies = neb2_energies + neb1_energies[1:]
neb1_structures = neb2.structures + neb1_structures[1:]
neb1_forces = list(neb2.forces) + list(neb1_forces)[1:]
neb1_r = [i for i in list(neb2.r)] + \
[i + list(neb2.r)[-1] for i in list(neb1_r)[1:]]
elif abs(neb1_end_e - neb2_start_e) == min_e_diff:
neb1_energies = neb1_energies + neb2_energies[1:]
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = [i for i in list(neb1_r)] + \
[i + neb1_r[-1] for i in list(neb2.r)[1:]]
else:
neb1_energies = neb1_energies + list(reversed(neb2_energies))[1:]
neb1_structures = neb1_structures + list(
reversed((neb2.structures)))[1:]
neb1_forces = list(neb1_forces) + \
list(reversed(list(neb2.forces)))[1:]
neb1_r = list(neb1_r) + list(
reversed([i * -1 - list(neb2.r)[-1] * -1 + list(neb1_r)[-1]
for i in list(neb2.r)[:-1]]))
if reverse_plot:
na = NEBAnalysis(
list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)])),
list(reversed(neb1_energies)),
list(reversed(neb1_forces)), list(reversed(neb1_structures)))
else:
na = NEBAnalysis(neb1_r, neb1_energies, neb1_forces, neb1_structures)
return na | mit |
shangwuhencc/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
renjinghai/models | autoencoder/MaskingNoiseAutoencoderRunner.py | 10 | 1689 | import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
dropout_probability = 0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
| apache-2.0 |
anjsimmo/simple-ml-pipeline | pipeline/task_traveltime.py | 1 | 1756 | from ruffus import *
import pandas as pd
import numpy as np
import datetime
import datatables.traveltime
import pipeline.data_merged
# Merge all dates into a single file
# X.merged -> traveltime.task
@merge(pipeline.data_merged.merge_data,
'data/traveltime.task')
def task_traveltime(input_files, output_file):
frames = []
for input_file in sorted(input_files):
frame = datatables.traveltime.read(input_file)
frames.append(frame)
all_data = pd.concat(frames, ignore_index=True)
# Renaming the field to be predicted (travel time) as 'y', and assigning an id
# makes it easier to reuse the same evaluation code for multiple tasks.
all_data = all_data.rename(columns = {'travel time':'y'})
all_data.insert(0, 'id', np.arange(all_data.shape[0])) # label ids 0,1,...
datatables.traveltime.write(all_data, output_file)
# Prepare train and test data sets for travel time task
# traveltime.task -> traveltime.task.train, traveltime.task.test, traveltime.task.test.xs
@split(task_traveltime,
['data/traveltime.task.train', 'data/traveltime.task.test', 'data/traveltime.task.test.xs'])
def split_task_traveltime(input_file, output_files):
all_data = datatables.traveltime.read(input_file)
train_out, test_out, test_xs = output_files
# split the training set into test/train at thresh date
thresh = datetime.datetime(2015,8,26)
low_dates = all_data[all_data['t'] < thresh]
high_dates = all_data[all_data['t'] >= thresh]
datatables.traveltime.write(low_dates, train_out)
datatables.traveltime.write(high_dates, test_out)
# hide travel time column to be predicted
high_dates_xs = high_dates.drop('y', 1)
datatables.traveltime.write_xs(high_dates_xs, test_xs)
| mit |
ningchi/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
ktaneishi/deepchem | deepchem/utils/evaluate.py | 1 | 7244 | """
Utility functions to evaluate models on datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import csv
import numpy as np
import warnings
import pandas as pd
import sklearn
from deepchem.utils.save import log
from deepchem.trans import undo_transforms
from deepchem.metrics import from_one_hot
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
def relative_difference(x, y):
"""Compute the relative difference between x and y"""
return np.abs(x - y) / np.abs(max(x, y))
def threshold_predictions(y, threshold):
y_out = np.zeros_like(y)
for ind, pred in enumerate(y):
y_out[ind] = 1 if pred > threshold else 0
return y_out
# TODO(rbharath): This is now simple enough that we should probably get rid of
# Evaluator object to avoid clutter.
class Evaluator(object):
"""Class that evaluates a model on a given dataset."""
def __init__(self, model, dataset, transformers, verbose=False):
self.model = model
self.dataset = dataset
self.output_transformers = [
transformer for transformer in transformers if transformer.transform_y
]
self.task_names = dataset.get_task_names()
self.verbose = verbose
def output_statistics(self, scores, stats_out):
"""
Write computed stats to file.
"""
with open(stats_out, "w") as statsfile:
statsfile.write(str(scores) + "\n")
def output_predictions(self, y_preds, csv_out):
"""
Writes predictions to file.
Args:
y_preds: np.ndarray
csvfile: Open file object.
"""
mol_ids = self.dataset.ids
n_tasks = len(self.task_names)
y_preds = np.reshape(y_preds, (len(y_preds), n_tasks))
assert len(y_preds) == len(mol_ids)
with open(csv_out, "w") as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(["Compound"] + self.dataset.get_task_names())
for mol_id, y_pred in zip(mol_ids, y_preds):
csvwriter.writerow([mol_id] + list(y_pred))
def compute_model_performance(self,
metrics,
csv_out=None,
stats_out=None,
per_task_metrics=False):
"""
Computes statistics of model on test data and saves results to csv.
Parameters
----------
metrics: list
List of dc.metrics.Metric objects
csv_out: str, optional
Filename to write CSV of model predictions.
stats_out: str, optional
Filename to write computed statistics.
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask dataset.
"""
y = self.dataset.y
y = undo_transforms(y, self.output_transformers)
w = self.dataset.w
if not len(metrics):
return {}
else:
mode = metrics[0].mode
y_pred = self.model.predict(self.dataset, self.output_transformers)
if mode == "classification":
y_pred_print = np.argmax(y_pred, -1)
else:
y_pred_print = y_pred
multitask_scores = {}
all_task_scores = {}
if csv_out is not None:
log("Saving predictions to %s" % csv_out, self.verbose)
self.output_predictions(y_pred_print, csv_out)
# Compute multitask metrics
for metric in metrics:
if per_task_metrics:
multitask_scores[metric.name], computed_metrics = metric.compute_metric(
y, y_pred, w, per_task_metrics=True)
all_task_scores[metric.name] = computed_metrics
else:
multitask_scores[metric.name] = metric.compute_metric(
y, y_pred, w, per_task_metrics=False)
if stats_out is not None:
log("Saving stats to %s" % stats_out, self.verbose)
self.output_statistics(multitask_scores, stats_out)
if not per_task_metrics:
return multitask_scores
else:
return multitask_scores, all_task_scores
class GeneratorEvaluator(object):
"""
Partner class to Evaluator.
Instead of operating over datasets this class operates over Generator.
Evaluate a Metric over a model and Generator.
"""
def __init__(self,
model,
generator,
transformers,
labels,
outputs=None,
n_tasks=1,
n_classes=2,
weights=list()):
"""
Parameters
----------
model: Model
Model to evaluate
generator: Generator
Generator which yields {layer: numpyArray} to feed into model
transformers:
Tranformers to "undo" when applied to the models outputs
labels: list of Layer
layers which are keys in the generator to compare to outputs
outputs: list of Layer
if None will use the outputs of the model
weights: np.array
Must be of the shape (n_samples, n_tasks)
if weights[sample][task] is 0 that sample will not be used
for computing the task metric
"""
self.model = model
self.generator = generator
self.n_tasks = n_tasks
self.n_classes = n_classes
self.output_transformers = [
transformer for transformer in transformers if transformer.transform_y
]
if outputs is None:
self.output_keys = model.outputs
else:
self.output_keys = outputs
self.label_keys = labels
self.weights = weights
if len(self.label_keys) != len(self.output_keys):
raise ValueError("Must have same number of labels and outputs")
if len(self.label_keys) != 1:
raise ValueError("GeneratorEvaluator currently only supports one label")
def compute_model_performance(self, metrics, per_task_metrics=False):
"""
Computes statistics of model on test data and saves results to csv.
Parameters
----------
metrics: list
List of dc.metrics.Metric objects
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask dataset.
"""
self.model.build()
y = []
w = []
def generator_closure():
for feed_dict in self.generator:
y.append(feed_dict[self.label_keys[0]])
if len(self.weights) > 0:
w.append(feed_dict[self.weights[0]])
yield feed_dict
if not len(metrics):
return {}
else:
mode = metrics[0].mode
y_pred = self.model.predict_on_generator(generator_closure())
y = np.concatenate(y, axis=0)
multitask_scores = {}
all_task_scores = {}
y = undo_transforms(y, self.output_transformers)
y_pred = undo_transforms(y_pred, self.output_transformers)
if len(w) != 0:
w = np.array(w)
w = np.reshape(w, newshape=y.shape)
# Compute multitask metrics
for metric in metrics:
if per_task_metrics:
multitask_scores[metric.name], computed_metrics = metric.compute_metric(
y, y_pred, w, per_task_metrics=True, n_classes=self.n_classes)
all_task_scores[metric.name] = computed_metrics
else:
multitask_scores[metric.name] = metric.compute_metric(
y, y_pred, w, per_task_metrics=False, n_classes=self.n_classes)
if not per_task_metrics:
return multitask_scores
else:
return multitask_scores, all_task_scores
| mit |
dmaticzka/EDeN | eden/util.py | 2 | 8894 | #!/usr/bin/env python
"""Provides utilities for file handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import io
from sklearn.externals import joblib
import requests
import os
import sys
from collections import deque
from itertools import tee
import random
import logging.handlers
import multiprocessing as mp
import time
from toolz.curried import concat
import logging
logger = logging.getLogger(__name__)
def timeit(method):
"""Time decorator."""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
logger.debug('%s %2.2f sec' % (method.__name__, te - ts))
return result
return timed
def pmap(func, iterable, chunk_size=1):
"""Multi-core map."""
pool = mp.Pool()
result = pool.map(func, iterable, chunksize=chunk_size)
pool.close()
pool.join()
return list(result)
def ppipe(iterable, func, chunk_size=1):
"""Multi-core pipe."""
out = pmap(func, iterable, chunk_size)
return list(concat(out))
def configure_logging(logger, verbosity=0, filename=None):
"""Utility to configure the logging aspects.
If filename is None then no info is stored in files.
If filename is not None then everything that is logged is dumped to file
(including program traces).
Verbosity is an int that can take values: 0 -> warning,
1 -> info, >=2 -> debug.
All levels are displayed on stdout, not on stderr.
Please use exceptions and asserts to output on stderr.
"""
logger.propagate = False
logger.handlers = []
log_level = logging.WARNING
if verbosity == 1:
log_level = logging.INFO
elif verbosity == 2:
log_level = logging.DEBUG
else:
log_level = 4
logger.setLevel(log_level)
# create console handler
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(log_level)
# create formatter
cformatter = logging.Formatter('%(message)s')
# add formatter to ch
ch.setFormatter(cformatter)
# add handlers to logger
logger.addHandler(ch)
if filename is not None:
# create a file handler
fh = logging.handlers.RotatingFileHandler(filename=filename,
maxBytes=10000000,
backupCount=10)
fh.setLevel(logging.DEBUG)
# create formatter
fformatter = logging.Formatter('%(asctime)s | %(levelname)-6s | %(name)10s | %(filename)10s |\
%(lineno)4s | %(message)s')
# add formatter to fh
fh.setFormatter(fformatter)
# add handlers to logger
logger.addHandler(fh)
def _serialize_list(items, separator='_'):
if isinstance(items, str):
return items
if is_iterable(items):
if isinstance(items, list):
return str(separator.join([str(item) for item in items]))
if isinstance(items, dict):
return str(separator.join([str(key) + ':' + str(items[key])
for key in items]))
else:
return str(items)
def serialize_dict(the_dict, full=True, offset='small'):
"""serialize_dict."""
if the_dict:
text = []
for key in sorted(the_dict):
if offset == 'small':
line = '%10s: %s' % (key, the_dict[key])
elif offset == 'large':
line = '%25s: %s' % (key, the_dict[key])
elif offset == 'very_large':
line = '%50s: %s' % (key, the_dict[key])
else:
raise Exception('unrecognized option: %s' % offset)
line = line.replace('\n', ' ')
if full is False:
if len(line) > 100:
line = line[:100] + ' ... ' + line[-20:]
text.append(line)
return '\n'.join(text)
else:
return ""
def read(uri):
"""Abstract read function.
EDeN can accept a URL, a file path and a python list.
In all cases an iterable object should be returned.
"""
if isinstance(uri, list):
# test if it is iterable: works for lists and generators, but not for
# strings
return uri
else:
try:
# try if it is a URL and if we can open it
f = requests.get(uri).text.split('\n')
except ValueError:
# assume it is a file object
f = open(uri)
return f
def is_iterable(test):
"""is_iterable."""
if hasattr(test, '__iter__'):
return True
else:
return False
def describe(data_matrix):
"""Get the shape of a sparse matrix and its average nnz."""
return 'Instances: %3d ; Features: %d with an avg of %d per instance' % \
(data_matrix.shape[0], data_matrix.shape[1],
data_matrix.getnnz() / data_matrix.shape[0])
def iterator_size(iterable):
"""Length of an iterator.
Note: if the iterable is a generator it consumes it.
"""
if hasattr(iterable, '__len__'):
return len(iterable)
d = deque(enumerate(iterable, 1), maxlen=1)
if d:
return d[0][0]
else:
return 0
def random_bipartition(int_range, relative_size=.7, random_state=None):
"""random_bipartition."""
if not random_state:
random_state = random.random()
random.seed(random_state)
ids = list(range(int_range))
random.shuffle(ids)
split_point = int(int_range * relative_size)
return ids[:split_point], ids[split_point:]
def selection_iterator(iterable, ids):
"""selection_iterator.
Given an iterable and a list of ids (zero based) yield only the
items whose id matches.
"""
ids = sorted(ids)
counter = 0
for id, item in enumerate(iterable):
if id == ids[counter]:
yield item
counter += 1
if counter == len(ids):
break
def random_bipartition_iter(iterable, relative_size=.5, random_state=1):
"""random_bipartition_iter."""
size_iterable, iterable1, iterable2 = tee(iterable, 3)
size = iterator_size(size_iterable)
part1_ids, part2_ids = random_bipartition(
size, relative_size=relative_size, random_state=random_state)
part1_iterable = selection_iterator(iterable1, part1_ids)
part2_iterable = selection_iterator(iterable2, part2_ids)
return part1_iterable, part2_iterable
def store_matrix(matrix='',
output_dir_path='',
out_file_name='',
output_format=''):
"""store_matrix."""
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
full_out_file_name = os.path.join(output_dir_path, out_file_name)
if output_format == "MatrixMarket":
if len(matrix.shape) == 1:
raise Exception(
"'MatrixMarket' format supports only 2D dimensional array\
and not vectors")
else:
io.mmwrite(full_out_file_name, matrix, precision=None)
elif output_format == "numpy":
np.save(full_out_file_name, matrix)
elif output_format == "joblib":
joblib.dump(matrix, full_out_file_name)
elif output_format == "text":
with open(full_out_file_name, "w") as f:
if len(matrix.shape) == 1:
for x in matrix:
f.write("%s\n" % (x))
else:
raise Exception(
"'text' format supports only mono dimensional array\
and not matrices")
logger.info("Written file: %s" % full_out_file_name)
def dump(obj, output_dir_path='', out_file_name=''):
"""dump."""
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
full_out_file_name = os.path.join(output_dir_path, out_file_name) + ".pkl"
joblib.dump(obj, full_out_file_name)
def load(output_dir_path='', out_file_name=''):
"""load."""
full_out_file_name = os.path.join(output_dir_path, out_file_name) + ".pkl"
obj = joblib.load(full_out_file_name)
return obj
def report_base_statistics(vec, separator='\n'):
"""report_base_statistics."""
from collections import Counter
c = Counter(vec)
msg = ''
for k in c:
msg += "class: %s count:%d (%0.2f)%s" % (
k, c[k], c[k] / float(len(vec)), separator)
return msg
def save_output(text=None, output_dir_path=None, out_file_name=None):
"""save_output."""
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
full_out_file_name = os.path.join(output_dir_path, out_file_name)
with open(full_out_file_name, 'w') as f:
for line in text:
f.write("%s\n" % str(line).strip())
logger.info("Written file: %s (%d lines)" %
(full_out_file_name, len(text)))
| mit |
tomlof/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
h2educ/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
hlin117/statsmodels | statsmodels/tools/tests/test_grouputils.py | 31 | 11494 | import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/cluster/tests/test_spectral.py | 72 | 7950 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| unlicense |
gobiiproject/GOBii-System | gobiiscripts/extractors/HapmapExtractor.py | 1 | 5351 | #!/usr/bin/env python
# encoding: utf-8
'''
edu.cornell.gobii.HapmapExtractor -- shortdesc
edu.cornell.gobii.HapmapExtractor is a description
It defines classes_and_methods
@author: yn259
@copyright: 2016 Cornell University. All rights reserved.
@license: license
@contact: yn259@cornell.edu
@deffield updated: Updated
'''
import sys
import os
import pandas as pd
import numpy as np
from optparse import OptionParser
__all__ = []
__version__ = 0.1
__date__ = '2016-07-05'
__updated__ = '2016-07-05'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
#program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
program_longdesc = '''''' # optional - give further explanation about what the program does
program_license = "Copyright 2016 yn259 (Cornell University) \
Licensed under the Apache License 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv
try:
# setup option parser
parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
parser.add_option("-p", "--project", dest="projfile", action="store", help="set project input path", metavar="FILE")
parser.add_option("-s", "--sample", dest="samplefile", action="store", help="set sample input path", metavar="FILE")
parser.add_option("-k", "--marker", dest="markerfile", action="store", help="set marker input path", metavar="FILE")
parser.add_option("-m", "--matrix", dest="matrixfile", action="store", help="set matrix input path", metavar="FILE")
parser.add_option("-o", "--out", dest="outfile", action="store", help="set output path", metavar="FILE")
# parser.add_option("-h", "--help", dest="help", action="help")
parser.add_option("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %default]")
# set defaults
parser.set_defaults(outfile="./out.txt", infile="./in.txt")
# process options
(opts, args) = parser.parse_args(argv)
if opts.verbose > 0:
print("verbosity level = %d" % opts.verbose)
if opts.infile:
print("infile = %s" % opts.infile)
if opts.outfile:
print("outfile = %s" % opts.outfile)
# MAIN BODY #
print('loading sample file...')
sampledata = pd.read_table(opts.samplefile, header=None, dtype='string')
print('transposing sample data...')
t_sample = sampledata.transpose();
s_rows = [None] * len(t_sample)
print('loading marker file...')
markerdata = pd.read_table(opts.markerfile, header=None, dtype='string')
columns=['assembly#', 'center', 'protLSID', 'assayLSID', 'panelLSID', 'QCcode']
df_columns = pd.DataFrame(columns)
marker_extra = pd.DataFrame(np.NaN, index=['NaN'] * (len(markerdata)-1), columns=columns)
marker_extra = pd.DataFrame(marker_extra.values)
marker_extra = pd.concat([df_columns.transpose(), marker_extra], ignore_index=True)
markerdata = pd.concat([markerdata, marker_extra], axis=1, ignore_index=True)
print('creating empty dataframes...')
df_empty = pd.DataFrame(np.NaN, index=s_rows, columns=range(len(markerdata.columns)-1))
df_empty = pd.DataFrame(df_empty.values)
print('concatenating df_empty with samples...')
sampledata = pd.concat([df_empty, t_sample], axis=1, ignore_index=True)
print('loading matrix file')
matrixdata = pd.read_table(opts.matrixfile, header=None, dtype='string')
print('concatenating markers with matrix...')
df_empty_matrix = pd.DataFrame(np.NaN, index=[None] * 1, columns=range(len(matrixdata.columns)))
matrixdata = pd.concat([df_empty_matrix, matrixdata], axis=0, ignore_index=True)
matrixdata = pd.concat([markerdata, matrixdata], axis=1, ignore_index=True)
print('concatenating samples with matrix...')
df_data = pd.concat([sampledata, matrixdata])
print('writing hapmap to file')
df_data.to_csv(opts.outfile, header=None, index=None, sep="\t", na_rep='', mode='w', line_terminator='\n')
return 0;
except Exception, e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
# if DEBUG:
# sys.argv.append("-h")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'edu.cornell.gobii.HapmapExtractor_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main()) | mit |
yandex/rep | rep/estimators/_tmvaFactory.py | 1 | 3221 | """
Supplementary script to train a TMVA estimator.
"""
from __future__ import division, print_function, absolute_import
import sys
import os
import numpy
import pandas
from root_numpy.tmva import add_classification_events, add_regression_events
import ROOT
from . import tmva
import six
from six.moves import cPickle as pickle
__author__ = 'Tatiana Likhomanenko'
def tmva_process(estimator, info, data, target, sample_weight):
"""
Create a TMVA classification/regression factory; training, testing and evaluating.
:param estimator: classifier/regressor which should be trained
:type estimator: rep.estimators.tmva.TMVAClassifier or rep.estimators.tmva.TMVARegressor
:param rep.estimators.tmva._AdditionalInformation info: additional information
:param pandas.DataFrame data: training data
:param target: array-like targets
:param sample_weight: array-like samples weights
"""
ROOT.TMVA.Tools.Instance()
file_out = ROOT.TFile(os.path.join(info.directory, info.tmva_root), "RECREATE")
factory = ROOT.TMVA.Factory(info.tmva_job, file_out, estimator.factory_options)
for var in data.columns:
factory.AddVariable(var)
# Set data
if info.model_type == 'classification':
if estimator.method == 'kCuts':
# signal must be the first added to the tree, because method *rectangular cut optimization* doesn't work in another way
inds = numpy.argsort(target)[::-1]
data = data.ix[inds, :]
target = target[inds]
sample_weight = sample_weight[inds]
add_classification_events(factory, numpy.array(data), target, weights=sample_weight)
add_classification_events(factory, numpy.array(data), target, weights=sample_weight, test=True)
elif info.model_type == 'regression':
factory.AddTarget('target')
add_regression_events(factory, numpy.array(data), target, weights=sample_weight)
add_regression_events(factory, numpy.array(data), target, weights=sample_weight, test=True)
else:
raise NotImplementedError("Doesn't support type {}".format(info.model_type))
factory.PrepareTrainingAndTestTree(ROOT.TCut('1'), "")
# Set method
parameters = ":".join(
["{key}={value}".format(key=key, value=value) for key, value in estimator.method_parameters.items()])
factory.BookMethod(ROOT.TMVA.Types.__getattribute__(ROOT.TMVA.Types, estimator.method), estimator._method_name,
parameters)
factory.TrainAllMethods()
file_out.Close()
def main():
# Python 2 dumps in text mode. Python 3 in binary.
if six.PY2:
stdin = sys.stdin
else:
stdin = sys.stdin.buffer
# Reading the configuration from the stdin
classifier = pickle.load(stdin)
info = pickle.load(stdin)
data = pickle.load(stdin)
labels = numpy.array(pickle.load(stdin))
sample_weight = numpy.array(pickle.load(stdin))
assert isinstance(classifier, tmva.TMVAClassifier) or isinstance(classifier, tmva.TMVARegressor)
assert isinstance(info, tmva._AdditionalInformation)
assert isinstance(data, pandas.DataFrame)
tmva_process(classifier, info, data, labels, sample_weight)
| apache-2.0 |
JohnVinyard/zounds | zounds/ui/training_monitor.py | 1 | 7083 | from io import BytesIO
from .api import ZoundsApp
import tornado.websocket
import tornado.web
import json
from collections import defaultdict
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
class TrainingMonitorApp(ZoundsApp):
def __init__(
self,
trainer,
keys_to_graph,
batch_frequency=10,
n_training_points=100,
epoch_key='epoch',
batch_key='batch',
base_path=r'/zounds/',
model=None,
visualization_feature=None,
audio_feature=None,
globals={},
locals={},
secret=None):
super(TrainingMonitorApp, self).__init__(
base_path=base_path,
model=model,
visualization_feature=visualization_feature,
audio_feature=audio_feature,
globals=globals,
locals=locals,
html='training_monitor.html',
secret=secret)
self.n_training_points = n_training_points
self.batch_frequency = batch_frequency
self.batch_key = batch_key
self.epoch_key = epoch_key
self.keys_to_graph = keys_to_graph
self.trainer = trainer
self.training_history = defaultdict(list)
self.trainer.register_batch_complete_callback(
self._collect_training_history)
def custom_routes(self):
routes = super(TrainingMonitorApp, self).custom_routes()
routes.extend([
(r'/zounds/training/?', self.training_handler()),
(r'/zounds/graph/?', self.graph_handler())
])
return routes
def _collect_training_history(self, *args, **kwargs):
batch = kwargs['batch']
if batch % self.batch_frequency:
return
for k in self.keys_to_graph:
# truncate
self.training_history[k] = \
self.training_history[k][-self.n_training_points:]
# append the new data
try:
self.training_history[k].append(kwargs[k])
except KeyError:
# no data has been added for this key
pass
def training_handler(self):
app = self
class TrainingHandler(tornado.websocket.WebSocketHandler):
def _send_message(self):
def x(*args, **kwargs):
batch = kwargs['batch']
if batch % app.batch_frequency:
return
data = dict(epoch=kwargs['epoch'], batch=batch)
for key in app.keys_to_graph:
try:
data[key] = kwargs[key]
except KeyError:
# there's no data to report for this key
pass
self.write_message(json.dumps(data))
return x
def open(self):
self.func = self._send_message()
app.trainer \
.register_batch_complete_callback(self.func)
def on_close(self):
app.trainer \
.unregister_batch_complete_callback(self.func)
return TrainingHandler
def graph_handler(self):
app = self
class GraphHandler(tornado.web.RequestHandler):
def get(self):
plt.style.use('dark_background')
fig = plt.figure()
handles = []
for k in app.keys_to_graph:
handle, = plt.plot(app.training_history[k], label=k)
handles.append(handle)
plt.legend(handles=handles)
bio = BytesIO()
plt.savefig(
bio, bbox_inches='tight', pad_inches=0, format='png')
bio.seek(0)
fig.clf()
plt.close('all')
self.set_header('Content-Type', 'image/png')
self.write(bio.read())
self.finish()
return GraphHandler
class SupervisedTrainingMonitorApp(TrainingMonitorApp):
def __init__(
self,
trainer,
batch_frequency=10,
n_training_points=100,
epoch_key='epoch',
batch_key='batch',
base_path=r'/zounds',
model=None,
visualization_feature=None,
audio_feature=None,
globals={},
locals={},
secret=None):
super(SupervisedTrainingMonitorApp, self).__init__(
trainer=trainer,
keys_to_graph=('train_error', 'test_error'),
model=model,
batch_frequency=batch_frequency,
n_training_points=n_training_points,
epoch_key=epoch_key,
batch_key=batch_key,
base_path=base_path,
visualization_feature=visualization_feature,
audio_feature=audio_feature,
globals=globals,
locals=locals,
secret=secret)
class GanTrainingMonitorApp(TrainingMonitorApp):
def __init__(
self,
trainer,
batch_frequency=10,
n_training_points=100,
epoch_key='epoch',
batch_key='batch',
base_path=r'/zounds',
model=None,
visualization_feature=None,
audio_feature=None,
globals={},
locals={},
secret=None):
super(GanTrainingMonitorApp, self).__init__(
trainer=trainer,
keys_to_graph=('generator_score', 'real_score', 'critic_loss'),
model=model,
batch_frequency=batch_frequency,
n_training_points=n_training_points,
epoch_key=epoch_key,
batch_key=batch_key,
base_path=base_path,
visualization_feature=visualization_feature,
audio_feature=audio_feature,
globals=globals,
locals=locals,
secret=secret)
class TripletEmbeddingMonitorApp(TrainingMonitorApp):
def __init__(
self,
trainer,
batch_frequency=10,
n_training_points=100,
epoch_key='epoch',
batch_key='batch',
base_path=r'/zounds',
model=None,
visualization_feature=None,
audio_feature=None,
globals={},
locals={},
secret=None):
super(TripletEmbeddingMonitorApp, self).__init__(
trainer=trainer,
keys_to_graph=('error',),
model=model,
batch_frequency=batch_frequency,
n_training_points=n_training_points,
epoch_key=epoch_key,
batch_key=batch_key,
base_path=base_path,
visualization_feature=visualization_feature,
audio_feature=audio_feature,
globals=globals,
locals=locals,
secret=secret)
| mit |
datasnakes/Datasnakes-Scripts | examples/standalone-scripts/mygene2csv.py | 1 | 1316 | #!/usr/bin/env python
"""This script is designed to generate some basic gene information from a csv
file of refseqrna accession numbers for human genes."""
import argparse
import textwrap
from OrthoEvol.Tools.mygene import MyGene
def main(infile, outfile):
"""Use MyGene to generate basic gene information.
:param infile: Path to csv input file.
:param outfile: Path to csv output file.
"""
mg = MyGene(infile, outfile)
mg.query_mygene()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
This is a command line interface the uses mygene
and pandas to convert a csv column of refseqrna
accession numbers of human genes to gene information.
'''))
parser.add_argument('-i', '--infile', help='Name and path of your input file.',
required=True)
parser.add_argument('-o', '--outfile',
help='Name and path of your output file.',
required=True)
args = parser.parse_args()
main(args.infile, args.outfile)
| mit |
joshloyal/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
jigargandhi/UdemyMachineLearning | Machine Learning A-Z Template Folder/Part 9 - Dimensionality Reduction/Section 44 - Linear Discriminant Analysis (LDA)/lda.py | 5 | 2836 | # LDA
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components = 2)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show() | mit |
CVML/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/widgets/menu.py | 1 | 4958 | from __future__ import division, print_function
import numpy as np
import matplotlib
import matplotlib.colors as colors
import matplotlib.patches as patches
import matplotlib.mathtext as mathtext
import matplotlib.pyplot as plt
import matplotlib.artist as artist
import matplotlib.image as image
class ItemProperties(object):
def __init__(self, fontsize=14, labelcolor='black', bgcolor='yellow',
alpha=1.0):
self.fontsize = fontsize
self.labelcolor = labelcolor
self.bgcolor = bgcolor
self.alpha = alpha
self.labelcolor_rgb = colors.colorConverter.to_rgb(labelcolor)
self.bgcolor_rgb = colors.colorConverter.to_rgb(bgcolor)
class MenuItem(artist.Artist):
parser = mathtext.MathTextParser("Bitmap")
padx = 5
pady = 5
def __init__(self, fig, labelstr, props=None, hoverprops=None,
on_select=None):
artist.Artist.__init__(self)
self.set_figure(fig)
self.labelstr = labelstr
if props is None:
props = ItemProperties()
if hoverprops is None:
hoverprops = ItemProperties()
self.props = props
self.hoverprops = hoverprops
self.on_select = on_select
x, self.depth = self.parser.to_mask(
labelstr, fontsize=props.fontsize, dpi=fig.dpi)
if props.fontsize != hoverprops.fontsize:
raise NotImplementedError(
'support for different font sizes not implemented')
self.labelwidth = x.shape[1]
self.labelheight = x.shape[0]
self.labelArray = np.zeros((x.shape[0], x.shape[1], 4))
self.labelArray[:, :, -1] = x/255.
self.label = image.FigureImage(fig, origin='upper')
self.label.set_array(self.labelArray)
# we'll update these later
self.rect = patches.Rectangle((0, 0), 1, 1)
self.set_hover_props(False)
fig.canvas.mpl_connect('button_release_event', self.check_select)
def check_select(self, event):
over, junk = self.rect.contains(event)
if not over:
return
if self.on_select is not None:
self.on_select(self)
def set_extent(self, x, y, w, h):
print(x, y, w, h)
self.rect.set_x(x)
self.rect.set_y(y)
self.rect.set_width(w)
self.rect.set_height(h)
self.label.ox = x + self.padx
self.label.oy = y - self.depth + self.pady/2.
self.rect._update_patch_transform()
self.hover = False
def draw(self, renderer):
self.rect.draw(renderer)
self.label.draw(renderer)
def set_hover_props(self, b):
if b:
props = self.hoverprops
else:
props = self.props
r, g, b = props.labelcolor_rgb
self.labelArray[:, :, 0] = r
self.labelArray[:, :, 1] = g
self.labelArray[:, :, 2] = b
self.label.set_array(self.labelArray)
self.rect.set(facecolor=props.bgcolor, alpha=props.alpha)
def set_hover(self, event):
'check the hover status of event and return true if status is changed'
b, junk = self.rect.contains(event)
changed = (b != self.hover)
if changed:
self.set_hover_props(b)
self.hover = b
return changed
class Menu(object):
def __init__(self, fig, menuitems):
self.figure = fig
fig.suppressComposite = True
self.menuitems = menuitems
self.numitems = len(menuitems)
maxw = max([item.labelwidth for item in menuitems])
maxh = max([item.labelheight for item in menuitems])
totalh = self.numitems*maxh + (self.numitems + 1)*2*MenuItem.pady
x0 = 100
y0 = 400
width = maxw + 2*MenuItem.padx
height = maxh + MenuItem.pady
for item in menuitems:
left = x0
bottom = y0 - maxh - MenuItem.pady
item.set_extent(left, bottom, width, height)
fig.artists.append(item)
y0 -= maxh + MenuItem.pady
fig.canvas.mpl_connect('motion_notify_event', self.on_move)
def on_move(self, event):
draw = False
for item in self.menuitems:
draw = item.set_hover(event)
if draw:
self.figure.canvas.draw()
break
fig = plt.figure()
fig.subplots_adjust(left=0.3)
props = ItemProperties(labelcolor='black', bgcolor='yellow',
fontsize=15, alpha=0.2)
hoverprops = ItemProperties(labelcolor='white', bgcolor='blue',
fontsize=15, alpha=0.2)
menuitems = []
for label in ('open', 'close', 'save', 'save as', 'quit'):
def on_select(item):
print('you selected %s' % item.labelstr)
item = MenuItem(fig, label, props=props, hoverprops=hoverprops,
on_select=on_select)
menuitems.append(item)
menu = Menu(fig, menuitems)
plt.show()
| mit |
datachand/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py | 4 | 6135 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def weights_var_imp():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy"],
min_rows=5,
ntrees=5,
max_depth=2)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy"],
training_frame=data2,
min_rows=5*min_rows_scale,
weights_column="weights",
ntrees=5,
max_depth=2)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy_20mpg"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=2)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["cylinders"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.set_names(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
tests.run_test(sys.argv, weights_var_imp)
| apache-2.0 |
mlperf/training_results_v0.7 | DellEMC/benchmarks/ssd/implementation/mxnet/tests/generate_report.py | 1 | 5957 | #!/usr/bin/env python
import os
import re
import argparse
from glob import glob
import collections
import git
import numpy as np
import pandas as pd
from jinja2 import Template
import plotly
import plotly.express as px
import plotly.graph_objects as go
try:
git_sha = git.Repo(search_parent_directories=True).head.object.hexsha
git_sha_short = git_sha[0:5]
except:
git_sha = '354127fbd3c12410249524617fa18d5e66c58d09'
git_sha_short = git_sha[0:5]
template_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'report_template.html.jinja2')
parser = argparse.ArgumentParser(description='Generate report for mxnet SSD tests')
parser.add_argument('--input-dir', type=str, default='tests/results/{git_hash}'.format(git_hash=git_sha_short),
help='Input directory')
parser.add_argument('--output', type=str, default='report_{git_hash}.html'.format(git_hash=git_sha_short),
help='Output HTML file name')
args = parser.parse_args()
def parse_log(fname, mlperf_re=True, target_map=23.0):
log = open(fname).read()
train_re = r"\[Training\]\[Epoch (.*)\] training time: (.*) \[sec\], avg speed: (.*) \[imgs/sec\], loss=(.*)"
val_re = r".*eval_accuracy\", \"value\": (\d*.\d*).*epoch_num\": (\d*)"
train_matches = re.finditer(train_re, log, re.MULTILINE)
val_matches = re.finditer(val_re, log, re.MULTILINE)
train_epoch = []
train_time = []
train_throughput = []
train_loss = []
for match in train_matches:
train_epoch.append(int(match.group(1)))
train_time.append(float(match.group(2)))
train_throughput.append(float(match.group(3)))
train_loss.append(float(match.group(4)))
train_df = pd.DataFrame({'epoch': train_epoch,
'time': train_time,
'throughput': train_throughput,
'loss': train_loss,
'fname': os.path.basename(fname)})
val_epoch = []
val_map = []
for match in val_matches:
if mlperf_re:
val_epoch.append(int(match.group(2)))
val_map.append(100*float(match.group(1)))
else:
val_epoch.append(int(match.group(1)))
val_map.append(float(match.group(2)))
val_df = pd.DataFrame({'epoch': val_epoch, 'map': val_map, 'fname': os.path.basename(fname)})
result = False
if not val_map or any(np.isnan(x) for x in train_loss):
result = 'nan'
elif val_map[-1]>=target_map:
result = 'success'
elif val_map[-1]<target_map:
result = 'failure'
return train_df, val_df, result
def parse_logs(logs, mlperf_re=True):
train_df = pd.DataFrame()
val_df = pd.DataFrame()
result = np.array([], dtype=str)
for log in logs:
train_df_, val_df_, result_ = parse_log(log, mlperf_re)
train_df = train_df.append(train_df_, ignore_index=True, sort=False)
val_df = val_df.append(val_df_, ignore_index=True, sort=False)
result = np.append(result, result_)
return train_df, val_df, result
def summarize_test(name, train_df, val_df, result):
result_collection = collections.Counter(result)
summary = {}
summary['name'] = name
summary['count'] = len(result)
summary['success'] = result_collection['success']
summary['nans'] = result_collection['nan']
summary['failed'] = result_collection['failure']
summary['non_convergent'] = result_collection['failure']+result_collection['nan']
# Job results bar graph
results_bar_fig = go.Figure(data=[go.Bar(x=list(result_collection.keys()),
y=list(result_collection.values()),
text=list(result_collection.values()),
textposition='auto')])
# Successful jobs convergance epoch bar graph
last_map = val_df.groupby(['fname']).last()
success_df = last_map[last_map['map']>=23] # Filter successful jobs only
agg_successl_df = success_df.groupby('epoch', as_index=False).count()
x = agg_successl_df['epoch'].values
y = agg_successl_df['map'].values
epochs_fig = go.Figure(data=[go.Bar(x=x, y=y, text=y, width=0.4, textposition='auto')])
epochs_fig.update_layout(barmode='group')
# Loss and mAP line graphs
train_loss_fig = None if train_df.empty else px.line(train_df, x="epoch", y="loss", color='fname')
train_throughput_fig = None if train_df.empty else px.line(train_df, x="epoch", y="throughput", color='fname')
val_map_fig = None if val_df.empty else px.line(val_df, x="epoch", y="map", color='fname')
summary['plot_names'] = ['Results', 'Convergance Distribution', 'Training Loss Graph', 'Validatino mAP Graph', 'Training throughput']
summary['plots'] = [results_bar_fig, epochs_fig, train_loss_fig, val_map_fig, train_throughput_fig]
summary['divs'] = []
for plot in summary['plots']:
if plot is not None:
summary['divs'].append(plotly.offline.plot(plot, output_type='div', include_plotlyjs=False))
else:
summary['divs'].append("<div>No validation graph</div>")
return summary
results = []
for config in sorted(glob(os.path.join(args.input_dir, '*'))):
print("Parsing {}".format(config))
name = os.path.basename(config)
logs = glob(os.path.join(config, '*.out'))
train_df, val_df, success = parse_logs(logs=logs, mlperf_re=True)
result = summarize_test(name, train_df, val_df, success)
results.append(result)
title = 'SSD MxNet Test Report'
subtitle = '{input_folder}'.format(input_folder=os.path.basename(args.input_dir))
try:
Template(open(template_file).read()) \
.stream(title=title, subtitle=subtitle, tests=results) \
.dump(args.output)
print("Wrote report to: {}".format(args.output))
except:
print("Somthing went wrong, no report was generated")
| apache-2.0 |
rtb1c13/scripts | Fields/plots.py | 1 | 2621 | #!/usr/bin/env python
# Matplotlib histogram of fields
import numpy as np
import scipy.stats as stats
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f","--files",help="Files for fields at first atom",nargs='+',type=str,required=True)
parser.add_argument("-f2","--filestwo",help="Files for fields at second atom",nargs='+',type=str)
args = parser.parse_args()
def combine_files(files):
"""Reads in a list of files and returns concatenated
Numpy array."""
cat = np.array([])
for fn in files:
x = np.loadtxt(fn)
cat = np.concatenate((cat,x))
return cat
def histogram(values):
"""Shows histogram of results in values"""
n, bins, patches = plt.hist(values, 50, normed=1, color='blue')
dens = stats.kde.gaussian_kde(values)
x = np.arange(np.min(values)-20.,np.max(values)+20.,2.)
plt.plot(x,dens(x),color='black',lw=3)
plt.xlabel(r"Field strength / MV cm$^{-1}$")
plt.ylabel("Normalised density")
plt.savefig("Fields.png")
def desc_stats(values,desc):
"""Writes out some descriptive statistics of the values
read in. Prepends output with 'desc'"""
xbar = np.mean(values)
sigma = np.std(values)
error = stats.sem(values)
with open("stats.txt",'a') as f:
# f.write("Description Mean Std. Dev. Std. Err.\n")
f.write("%s %7.3f %7.3f %7.3f\n" % (desc.ljust(14),xbar,sigma,error))
#def combined_stats(values1,prefix):
# """Writes out stats for combination of values read in
# e.g. average field between two,
def combine_atoms(val1,val2):
"""Combines two arrays of fields at given atoms
and returns the average field and field drop
(average and difference of values in arrays)"""
ave = (val1 + val2) / 2
diff = val2 - val1
return ave,diff
def main():
atm1 = combine_files(args.files)
with open("stats.txt",'w') as f:
f.write("Description Mean Std. Dev. Std. Err.\n")
desc_stats(atm1,"atm1")
if args.filestwo is not None:
atm2 = combine_files(args.filestwo)
desc_stats(atm2,"atm2")
Fvib,dFvib = combine_atoms(atm1,atm2)
desc_stats(Fvib,"Ave field")
desc_stats(dFvib,"Field drop")
histogram(Fvib)
else:
histogram(atm1)
main()
#y = mlab.normpdf( bins, mu, sigma)
#l = plt.plot(bins, y, 'r--', linewidth=1)
#From tuts
#plt.xlabel('Smarts')
#plt.ylabel('Probability')
#plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
#plt.axis([40, 160, 0, 0.03])
#plt.grid(True)
#plt.show()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.