repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
jhamman/xray | xarray/tests/test_formatting.py | 1 | 6100 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from xarray.core import formatting
from xarray.core.pycompat import PY3
from . import TestCase
class TestFormatting(TestCase):
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),)),
((3, 20,), (0, slice(10))),
((2, 10,), (0, slice(10))),
((2, 5,), (slice(2), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None))),
]
for shape, expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10)
self.assertEqual(expected, actual)
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
self.assertItemsEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
self.assertEqual(result, expected)
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
(pd.Timedelta('10 days 1 hour'), '10 days 01:00:00'),
(pd.Timedelta('-3 days'), '-3 days +00:00:00'),
(pd.Timedelta('3 hours'), '0 days 03:00:00'),
(pd.Timedelta('NaT'), 'NaT'),
('foo', "'foo'"),
(u'foo', "'foo'" if PY3 else "u'foo'"),
(b'foo', "b'foo'" if PY3 else "'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
self.assertEqual(expected, actual)
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, 'D'),
'0 days 1 days 2 days 3 days'),
(np.arange(4) * np.timedelta64(3, 'h'),
'00:00:00 03:00:00 06:00:00 09:00:00'),
(np.arange(4) * np.timedelta64(500, 'ms'),
'00:00:00 00:00:00.500000 00:00:01 00:00:01.500000'),
(pd.to_timedelta(['NaT', '0s', '1s', 'NaT']),
'NaT 00:00:00 00:00:01 NaT'),
(pd.to_timedelta(['1 day 1 hour', '1 day', '0 hours']),
'1 days 01:00:00 1 days 00:00:00 0 days 00:00:00'),
([1, 2, 3], '1 2 3'),
]
for item, expected in cases:
actual = ' '.join(formatting.format_items(item))
self.assertEqual(expected, actual)
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 13)
expected = '0 1 2 3 4 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = '0.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(3), 5)
expected = '0 1 2'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4), 0)
expected = '0 ...'
self.assertEqual(expected, actual)
def test_pretty_print(self):
self.assertEqual(formatting.pretty_print('abcdefghij', 8), 'abcde...')
self.assertEqual(formatting.pretty_print(u'ß', 1), u'ß')
def test_maybe_truncate(self):
self.assertEqual(formatting.maybe_truncate(u'ß', 10), u'ß')
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = '1300-12-01'
result = formatting.format_timestamp(date)
self.assertEqual(result, expected)
date = datetime(2300, 12, 1)
expected = '2300-12-01'
result = formatting.format_timestamp(date)
self.assertEqual(result, expected)
def test_attribute_repr(self):
short = formatting.summarize_attr(u'key', u'Short string')
long = formatting.summarize_attr(u'key', 100 * u'Very long string ')
newlines = formatting.summarize_attr(u'key', u'\n\n\n')
tabs = formatting.summarize_attr(u'key', u'\t\t\t')
self.assertEqual(short, ' key: Short string')
self.assertLessEqual(len(long), 80)
self.assertTrue(long.endswith(u'...'))
self.assertNotIn(u'\n', newlines)
self.assertNotIn(u'\t', tabs)
def test_set_numpy_options():
original_options = np.get_printoptions()
with formatting.set_numpy_options(threshold=10):
assert len(repr(np.arange(500))) < 200
# original options are restored
assert np.get_printoptions() == original_options
def test_short_array_repr():
cases = [
np.random.randn(500),
np.random.randn(20, 20),
np.random.randn(5, 10, 15),
np.random.randn(5, 10, 15, 3),
]
# number of lines:
# for default numpy repr: 167, 140, 254, 248
# for short_array_repr: 1, 7, 24, 19
for array in cases:
num_lines = formatting.short_array_repr(array).count('\n') + 1
assert num_lines < 30
| apache-2.0 |
JeyZeta/Dangerous | Dangerous/Golismero/thirdparty_libs/nltk/classify/scikitlearn.py | 12 | 6078 | # Natural Language Toolkit: Interface to scikit-learn classifiers
#
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
scikit-learn (http://scikit-learn.org) is a machine learning library for
Python, supporting most of the basic classification algorithms, including SVMs,
Naive Bayes, logistic regression and decision trees.
This package implement a wrapper around scikit-learn classifiers. To use this
wrapper, construct a scikit-learn classifier, then use that to construct a
SklearnClassifier. E.g., to wrap a linear SVM classifier with default settings,
do
>>> from sklearn.svm.sparse import LinearSVC
>>> from nltk.classify.scikitlearn import SklearnClassifier
>>> classif = SklearnClassifier(LinearSVC())
The scikit-learn classifier may be arbitrarily complex. E.g., the following
constructs and wraps a Naive Bayes estimator with tf-idf weighting and
chi-square feature selection:
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> from sklearn.naive_bayes import MultinomialNB
>>> from sklearn.pipeline import Pipeline
>>> pipeline = Pipeline([('tfidf', TfidfTransformer()),
... ('chi2', SelectKBest(chi2, k=1000)),
... ('nb', MultinomialNB())])
>>> classif = SklearnClassifier(pipeline)
(Such a classifier could be trained on word counts for text classification.)
"""
from nltk.classify.api import ClassifierI
from nltk.probability import DictionaryProbDist
from scipy.sparse import coo_matrix
try:
import numpy as np
except ImportError:
pass
class SklearnClassifier(ClassifierI):
"""Wrapper for scikit-learn classifiers."""
def __init__(self, estimator, dtype=float, sparse=True):
"""
:param estimator: scikit-learn classifier object.
:param dtype: data type used when building feature array.
scikit-learn estimators work exclusively on numeric data; use bool
when all features are binary.
:param sparse: Whether to use sparse matrices. The estimator must
support these; not all scikit-learn classifiers do. The default
value is True, since most NLP problems involve sparse feature sets.
:type sparse: boolean.
"""
self._clf = estimator
self._dtype = dtype
self._sparse = sparse
def __repr__(self):
return "<SklearnClassifier(%r)>" % self._clf
def batch_classify(self, featuresets):
X = self._convert(featuresets)
y = self._clf.predict(X)
return [self._index_label[int(yi)] for yi in y]
def batch_prob_classify(self, featuresets):
X = self._convert(featuresets)
y_proba = self._clf.predict_proba(X)
return [self._make_probdist(y_proba[i]) for i in xrange(len(y_proba))]
def labels(self):
return self._label_index.keys()
def train(self, labeled_featuresets):
"""
Train (fit) the scikit-learn estimator.
:param labeled_featuresets: A list of classified featuresets,
i.e., a list of tuples ``(featureset, label)``.
"""
self._feature_index = {}
self._index_label = []
self._label_index = {}
for fs, label in labeled_featuresets:
for f in fs.iterkeys():
if f not in self._feature_index:
self._feature_index[f] = len(self._feature_index)
if label not in self._label_index:
self._index_label.append(label)
self._label_index[label] = len(self._label_index)
featuresets, labels = zip(*labeled_featuresets)
X = self._convert(featuresets)
y = np.array([self._label_index[l] for l in labels])
self._clf.fit(X, y)
return self
def _convert(self, featuresets):
if self._sparse:
return self._featuresets_to_coo(featuresets)
else:
return self._featuresets_to_array(featuresets)
def _featuresets_to_coo(self, featuresets):
"""Convert featuresets to sparse matrix (COO format)."""
i_ind = []
j_ind = []
values = []
for i, fs in enumerate(featuresets):
for f, v in fs.iteritems():
try:
j = self._feature_index[f]
i_ind.append(i)
j_ind.append(j)
values.append(self._dtype(v))
except KeyError:
pass
shape = (i + 1, len(self._feature_index))
return coo_matrix((values, (i_ind, j_ind)), shape=shape, dtype=self._dtype)
def _featuresets_to_array(self, featuresets):
"""Convert featureset to Numpy array."""
X = np.zeros((len(featuresets), len(self._feature_index)),
dtype=self._dtype)
for i, fs in enumerate(featuresets):
for f, v in fs.iteritems():
try:
X[i, self._feature_index[f]] = self._dtype(v)
except KeyError: # feature not seen in training
pass
return X
def _make_probdist(self, y_proba):
return DictionaryProbDist(dict((self._index_label[i], p)
for i, p in enumerate(y_proba)))
if __name__ == "__main__":
from nltk.classify.util import names_demo, binary_names_demo_features
try:
from sklearn.linear_model.sparse import LogisticRegression
except ImportError: # separate sparse LR to be removed in 0.12
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
print("scikit-learn Naive Bayes:")
names_demo(SklearnClassifier(BernoulliNB(binarize=False), dtype=bool).train,
features=binary_names_demo_features)
print("scikit-learn logistic regression:")
names_demo(SklearnClassifier(LogisticRegression(), dtype=np.float64).train,
features=binary_names_demo_features)
| mit |
tdeboissiere/DeepLearningImplementations | GAN/src/utils/batch_utils.py | 8 | 2862 | import time
import numpy as np
import multiprocessing
import os
import h5py
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import cm
class DataGenerator(object):
"""
Generate minibatches with real-time data parallel augmentation on CPU
args :
hdf5_file (str) path to data in HDF5 format
batch_size (int) Minibatch size
dset (str) train/test/valid, the name of the dset to iterate over
maxproc (int) max number of processes to spawn in parallel
num_cached (int) max number of batches to keep in queue
yields :
X, y (minibatch data and labels as np arrays)
"""
def __init__(self,
hdf5_file,
batch_size=32,
nb_classes=12,
dset="training",
maxproc=8,
num_cached=10):
# Check file exists
assert os.path.isfile(hdf5_file), hdf5_file + " doesn't exist"
# Initialize class internal variables
self.dset = dset
self.maxproc = maxproc
self.hdf5_file = hdf5_file
self.batch_size = batch_size
self.num_cached = num_cached
self.nb_classes = nb_classes
# Dict that will store all transformations and their parameters
self.d_transform = {}
# Read the data file to get dataset shape information
with h5py.File(self.hdf5_file, "r") as hf:
self.X_shape = hf["data"].shape
assert len(self.X_shape) == 4,\
("\n\nImg data should be formatted as: \n"
"(n_samples, n_channels, Height, Width)")
self.n_samples = hf["data"].shape[0]
# Verify n_channels is at index 1
assert self.X_shape[-3] < min(self.X_shape[-2:]),\
("\n\nImg data should be formatted as: \n"
"(n_samples, n_channels, Height, Width)")
# Save the class internal variables to a config dict
self.d_config = {}
self.d_config["hdf5_file"] = hdf5_file
self.d_config["batch_size"] = batch_size
self.d_config["dset"] = dset
self.d_config["num_cached"] = num_cached
self.d_config["maxproc"] = maxproc
self.d_config["data_shape"] = self.X_shape
def get_config(self):
return self.d_config
def gen_batch_inmemory_GAN(self, X_real, batch_size=None):
"""Generate batch, assuming X is loaded in memory in the main program"""
while True:
bs = self.batch_size
if batch_size is not None:
bs = batch_size
# Select idx at random for the batch
idx = np.random.choice(X_real.shape[0], bs, replace=False)
X_batch_real = X_real[idx]
yield X_batch_real
| mit |
zrhans/pythonanywhere | pyscripts/ply_O3.py | 1 | 4002 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,
Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,
Pres.Atm.,Pres.Atm._min,Pres.Atm._max,
Temp.Int.,Temp.Int._min,Temp.Int._max,
CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,
SO2,SO2_min,SO2_max,
O3,O3_min,O3_max,
NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,
CO,CO_min,CO_max,
MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,
Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
r0 = df.O3
t0 = df.DVE
#print(y)
# Definindo as series dedados
trace1 = go.Scatter(
r=r0,#[6.804985785265978, 3.389596010612268, 5.3814721107464445, 8.059540219420184, 5.318229227868589, 2.9850999356273773, 1.9665870023752283, 6.769265408206589, 4.073401898721205, 6.50437182526841, 7.556369818996649, 4.047456094066775, 7.386662496070009, 5.413624736983931, 7.470716531163242, 7.982110216939738, 4.737814080093381, 4.206453042929911, 5.478604804594065, 4.824520280697772, 5.599600609899737, 6.8667952170824735, 3.0856713662561464, 7.771810943227382, 3.6877944350967193, 5.360356685192225, 5.140446739300986, 6.045445680928888, 6.833920940193708, 3.6207694625408364, 3.9894305834039687, 5.3118244995018, 4.608213480282062, 6.640584716151912, 3.055188854482986, 7.492564163752965, 5.4850781777896715, 3.8977949966209358, 5.976245114026165, 5.447061560910957, 5.37703411681004, 4.690805787731301, 4.711640491184845, 3.629919329394875, 5.957668076372498, 5.357121284391151, 3.849235282821748, 6.250507136319218, 7.122243357145468, 3.399404233835391, 3.5105566722713313, 4.100997603660974, 4.096382100199779, 6.233583074805102, 3.939488526772935, 3.9254450773976983, 6.118132501462698, 3.9404503462852323, 7.583015573261159, 3.513202145338516],
t=t0,#[-30.352944361883697, -25.611459854524096, -12.425227452676078, 13.96138051872652, -4.9509328406707445, -25.692274190905437, 12.46876416157031, -4.913764107032951, -10.967380287631935, 30.814194054910676, 2.4749594311442737, 17.97554375239156, 0.7711305933623585, 6.137488485631386, -14.451963574013497, 28.184534112915948, 12.538680065954864, -8.983230337131154, 5.231285164762417, -64.48900253584051, 11.357486681772649, 3.4540747915125176, 13.924346613092862, -25.364002046782343, -16.81800638602268, -10.260051030559755, -13.212134125591882, 2.5793388653025744, 8.717574965852519, -10.675498719239487, -2.926366012522306, 25.195880754767717, 40.59032932155964, -9.121433630189772, -24.297362381339184, -3.1769445056889345, 10.85049841917252, -31.33205974736701, 4.849567462214266, 15.048276954124187, 3.2951046992599635, -6.197091873129837, -8.77857413578066, 29.549174119407287, -5.1374487928814645, 23.02686048794348, -6.634816578371129, 2.7550149918614695, 21.733250113653973, -24.816994960101756, -7.83054706253201, 28.325796210205855, 12.300977467795988, -21.563157240034112, -19.335516283813288, 26.146443170846787, -1.7060712026841085, 16.071723694996702, 2.053266302846965, -5.097911612332572],
mode='markers',
name='O3',
marker=dict(
color='rgb(30,171,200)',
size=110,
line=dict(
color='white'
),
opacity=0.7
)
)
layout = go.Layout(
title='BSE01 - Ozônio - Médias horárias',
font=dict(
size=15
),
plot_bgcolor='rgb(223, 223, 223)',
angularaxis=dict(
tickcolor='rgb(253,253,253)'
),
orientation=270,
radialaxis=dict(
ticksuffix='ppm'
),
)
#Gerando multiplos diagramas ld
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/BSE01/2016/ld_O3', # name of the file as saved in your plotly account
sharing='public'
)
| apache-2.0 |
zhengwsh/InplusTrader_Linux | rqalpha/api/api_base.py | 1 | 30203 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
更多描述请见
https://www.ricequant.com/api/python/chn
'''
from __future__ import division
import datetime
import inspect
import sys
from collections import Iterable
from functools import wraps
from types import FunctionType
# noinspection PyUnresolvedReferences
from typing import List
import pandas as pd
import six
from dateutil.parser import parse
from . import names
from ..environment import Environment
from ..execution_context import ExecutionContext
from ..utils import to_industry_code, to_sector_name, unwrapper
from ..utils.exception import patch_user_exc, patch_system_exc, EXC_EXT_NAME, RQInvalidArgument
from ..utils.i18n import gettext as _
# noinspection PyUnresolvedReferences
from ..utils.logger import user_log as logger
from ..model.instrument import SectorCodeItem, IndustryCodeItem
from ..utils.arg_checker import apply_rules, verify_that
# noinspection PyUnresolvedReferences
from ..model.instrument import Instrument, SectorCode as sector_code, IndustryCode as industry_code
# noinspection PyUnresolvedReferences
from ..const import EXECUTION_PHASE, EXC_TYPE, ORDER_STATUS, SIDE, POSITION_EFFECT, ORDER_TYPE, MATCHING_TYPE, RUN_TYPE
# noinspection PyUnresolvedReferences
from ..model.order import Order, MarketOrder, LimitOrder
__all__ = [
'logger',
'sector_code',
'industry_code',
'LimitOrder',
'MarketOrder',
'ORDER_STATUS',
'SIDE',
'POSITION_EFFECT',
'ORDER_TYPE',
'RUN_TYPE',
'MATCHING_TYPE',
]
def decorate_api_exc(func):
f = func
exception_checked = False
while True:
if getattr(f, '_rq_exception_checked', False):
exception_checked = True
break
f = getattr(f, '__wrapped__', None)
if f is None:
break
if not exception_checked:
func = api_exc_patch(func)
return func
def api_exc_patch(func):
if isinstance(func, FunctionType):
@wraps(func)
def deco(*args, **kwargs):
try:
return func(*args, **kwargs)
except RQInvalidArgument:
raise
except Exception as e:
if isinstance(e, TypeError):
exc_info = sys.exc_info()
try:
ret = inspect.getcallargs(unwrapper(func), *args, **kwargs)
except TypeError:
t, v, tb = exc_info
raise patch_user_exc(v.with_traceback(tb))
if getattr(e, EXC_EXT_NAME, EXC_TYPE.NOTSET) == EXC_TYPE.NOTSET:
patch_system_exc(e)
raise
return deco
return func
def register_api(name, func):
globals()[name] = func
__all__.append(name)
def export_as_api(func):
__all__.append(func.__name__)
func = decorate_api_exc(func)
globals()[func.__name__] = func
return func
def assure_order_book_id(id_or_ins):
if isinstance(id_or_ins, Instrument):
order_book_id = id_or_ins.order_book_id
elif isinstance(id_or_ins, six.string_types):
order_book_id = instruments(id_or_ins).order_book_id
else:
raise RQInvalidArgument(_(u"unsupported order_book_id type"))
return order_book_id
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def get_order(order):
return order
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def get_open_orders():
"""
获取当日未成交订单数据
:return: List[:class:`~Order` object]
"""
return Environment.get_instance().broker.get_open_orders()
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def cancel_order(order):
"""
撤单
:param order: 需要撤销的order对象
:type order: :class:`~Order` object
"""
if order is None:
patch_user_exc(KeyError(_(u"Cancel order fail: invalid order id")))
env = Environment.get_instance()
if env.can_cancel_order(order):
env.broker.cancel_order(order)
return order
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('id_or_symbols').are_valid_instruments())
def update_universe(id_or_symbols):
"""
该方法用于更新现在关注的证券的集合(e.g.:股票池)。PS:会在下一个bar事件触发时候产生(新的关注的股票池更新)效果。并且update_universe会是覆盖(overwrite)的操作而不是在已有的股票池的基础上进行增量添加。比如已有的股票池为['000001.XSHE', '000024.XSHE']然后调用了update_universe(['000030.XSHE'])之后,股票池就会变成000030.XSHE一个股票了,随后的数据更新也只会跟踪000030.XSHE这一个股票了。
:param id_or_ins: 标的物
:type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`]
"""
if isinstance(id_or_symbols, (six.string_types, Instrument)):
id_or_symbols = [id_or_symbols]
order_book_ids = set(assure_order_book_id(order_book_id) for order_book_id in id_or_symbols)
if order_book_ids != Environment.get_instance().get_universe():
Environment.get_instance().update_universe(order_book_ids)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('id_or_symbols').are_valid_instruments())
def subscribe(id_or_symbols):
"""
订阅合约行情。该操作会导致合约池内合约的增加,从而影响handle_bar中处理bar数据的数量。
需要注意,用户在初次编写策略时候需要首先订阅合约行情,否则handle_bar不会被触发。
:param id_or_ins: 标的物
:type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`]
"""
current_universe = Environment.get_instance().get_universe()
if isinstance(id_or_symbols, six.string_types):
order_book_id = instruments(id_or_symbols).order_book_id
current_universe.add(order_book_id)
elif isinstance(id_or_symbols, Instrument):
current_universe.add(id_or_symbols.order_book_id)
elif isinstance(id_or_symbols, Iterable):
for item in id_or_symbols:
current_universe.add(assure_order_book_id(item))
else:
raise RQInvalidArgument(_(u"unsupported order_book_id type"))
verify_that('id_or_symbols')._are_valid_instruments("subscribe", id_or_symbols)
Environment.get_instance().update_universe(current_universe)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('id_or_symbols').are_valid_instruments())
def unsubscribe(id_or_symbols):
"""
取消订阅合约行情。取消订阅会导致合约池内合约的减少,如果当前合约池中没有任何合约,则策略直接退出。
:param id_or_ins: 标的物
:type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`]
"""
current_universe = Environment.get_instance().get_universe()
if isinstance(id_or_symbols, six.string_types):
order_book_id = instruments(id_or_symbols).order_book_id
current_universe.discard(order_book_id)
elif isinstance(id_or_symbols, Instrument):
current_universe.discard(id_or_symbols.order_book_id)
elif isinstance(id_or_symbols, Iterable):
for item in id_or_symbols:
i = assure_order_book_id(item)
current_universe.discard(i)
else:
raise RQInvalidArgument(_(u"unsupported order_book_id type"))
Environment.get_instance().update_universe(current_universe)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('date').is_valid_date(ignore_none=True),
verify_that('tenor').is_in(names.VALID_TENORS, ignore_none=True))
def get_yield_curve(date=None, tenor=None):
"""
获取某个国家市场指定日期的收益率曲线水平。
数据为2002年至今的中债国债收益率曲线,来源于中央国债登记结算有限责任公司。
:param date: 查询日期,默认为策略当前日期前一天
:type date: `str` | `date` | `datetime` | `pandas.Timestamp`
:param str tenor: 标准期限,'0S' - 隔夜,'1M' - 1个月,'1Y' - 1年,默认为全部期限
:return: `pandas.DataFrame` - 查询时间段内无风险收益率曲线
:example:
.. code-block:: python3
:linenos:
[In]
get_yield_curve('20130104')
[Out]
0S 1M 2M 3M 6M 9M 1Y 2Y \
2013-01-04 0.0196 0.0253 0.0288 0.0279 0.0280 0.0283 0.0292 0.0310
3Y 4Y ... 6Y 7Y 8Y 9Y 10Y \
2013-01-04 0.0314 0.0318 ... 0.0342 0.0350 0.0353 0.0357 0.0361
...
"""
env = Environment.get_instance()
trading_date = env.trading_dt.date()
yesterday = env.data_proxy.get_previous_trading_date(trading_date)
if date is None:
date = yesterday
else:
date = pd.Timestamp(date)
if date > yesterday:
raise RQInvalidArgument('get_yield_curve: {} >= now({})'.format(date, yesterday))
return env.data_proxy.get_yield_curve(start_date=date, end_date=date, tenor=tenor)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument(),
verify_that('bar_count').is_instance_of(int).is_greater_than(0),
verify_that('frequency').is_in(('1m', '1d')),
verify_that('fields').are_valid_fields(names.VALID_HISTORY_FIELDS, ignore_none=True),
verify_that('skip_suspended').is_instance_of(bool))
def history_bars(order_book_id, bar_count, frequency, fields=None, skip_suspended=True, include_now=False):
"""
获取指定合约的历史行情,同时支持日以及分钟历史数据。不能在init中调用。 注意,该API会自动跳过停牌数据。
日回测获取分钟历史数据:不支持
日回测获取日历史数据
========================= ===================================================
调用时间 返回数据
========================= ===================================================
T日before_trading T-1日day bar
T日handle_bar T日day bar
========================= ===================================================
分钟回测获取日历史数据
========================= ===================================================
调用时间 返回数据
========================= ===================================================
T日before_trading T-1日day bar
T日handle_bar T-1日day bar
========================= ===================================================
分钟回测获取分钟历史数据
========================= ===================================================
调用时间 返回数据
========================= ===================================================
T日before_trading T-1日最后一个minute bar
T日handle_bar T日当前minute bar
========================= ===================================================
:param order_book_id: 合约代码
:type order_book_id: `str`
:param int bar_count: 获取的历史数据数量,必填项
:param str frequency: 获取数据什么样的频率进行。'1d'或'1m'分别表示每日和每分钟,必填项
:param str fields: 返回数据字段。必填项。见下方列表。
========================= ===================================================
fields 字段名
========================= ===================================================
datetime 时间戳
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
total_turnover 成交额
datetime int类型时间戳
open_interest 持仓量(期货专用)
basis_spread 期现差(股指期货专用)
settlement 结算价(期货日线专用)
prev_settlement 结算价(期货日线专用)
========================= ===================================================
:return: `ndarray`, 方便直接与talib等计算库对接,效率较history返回的DataFrame更高。
:example:
获取最近5天的日线收盘价序列(策略当前日期为20160706):
.. code-block:: python3
:linenos:
[In]
logger.info(history_bars('000002.XSHE', 5, '1d', 'close'))
[Out]
[ 8.69 8.7 8.71 8.81 8.81]
"""
order_book_id = assure_order_book_id(order_book_id)
env = Environment.get_instance()
dt = env.calendar_dt
if frequency == '1m' and env.config.base.frequency == '1d':
raise RQInvalidArgument('can not get minute history in day back test')
if ((env.config.base.frequency == '1m' and frequency == '1d') or
(frequency == '1d' and ExecutionContext.phase == EXECUTION_PHASE.BEFORE_TRADING)):
# 在分钟回测获取日线数据, 应该推前一天,这里应该使用 trading date
dt = env.data_proxy.get_previous_trading_date(env.trading_dt)
return env.data_proxy.history_bars(order_book_id, bar_count, frequency, fields, dt, skip_suspended, include_now)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('type').is_in(names.VALID_INSTRUMENT_TYPES, ignore_none=True))
def all_instruments(type=None):
"""
获取某个国家市场的所有合约信息。使用者可以通过这一方法很快地对合约信息有一个快速了解,目前仅支持中国市场。
:param str type: 需要查询合约类型,例如:type='CS'代表股票。默认是所有类型
:return: `pandas DataFrame` 所有合约的基本信息。
其中type参数传入的合约类型和对应的解释如下:
========================= ===================================================
合约类型 说明
========================= ===================================================
CS Common Stock, 即股票
ETF Exchange Traded Fund, 即交易所交易基金
LOF Listed Open-Ended Fund,即上市型开放式基金
FenjiMu Fenji Mu Fund, 即分级母基金
FenjiA Fenji A Fund, 即分级A类基金
FenjiB Fenji B Funds, 即分级B类基金
INDX Index, 即指数
Future Futures,即期货,包含股指、国债和商品期货
hour int - option [1,4]
minute int - option [1,240]
========================= ===================================================
:example:
获取中国市场所有分级基金的基础信息:
.. code-block:: python3
:linenos:
[In]all_instruments('FenjiA')
[Out]
abbrev_symbol order_book_id product sector_code symbol
0 CYGA 150303.XSHE null null 华安创业板50A
1 JY500A 150088.XSHE null null 金鹰500A
2 TD500A 150053.XSHE null null 泰达稳健
3 HS500A 150110.XSHE null null 华商500A
4 QSAJ 150235.XSHE null null 鹏华证券A
...
"""
return Environment.get_instance().data_proxy.all_instruments(type)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('id_or_symbols').is_instance_of((str, Iterable)))
def instruments(id_or_symbols):
"""
获取某个国家市场内一个或多个合约的详细信息。目前仅支持中国市场。
:param order_book_id: 合约代码或者合约代码列表
:type order_book_id: `str` | List[`str`]
:return: :class:`~StockInstrument` | :class:`~FutureInstrument`
目前系统并不支持跨国家市场的同时调用。传入 order_book_id list必须属于同一国家市场,不能混合着中美两个国家市场的order_book_id。
:example:
* 获取单一股票合约的详细信息:
.. code-block:: python3
:linenos:
[In]instruments('000001.XSHE')
[Out]
Instrument(order_book_id=000001.XSHE, symbol=平安银行, abbrev_symbol=PAYH, listed_date=19910403, de_listed_date=null, board_type=MainBoard, sector_code_name=金融, sector_code=Financials, round_lot=100, exchange=XSHE, special_type=Normal, status=Active)
* 获取多个股票合约的详细信息:
.. code-block:: python3
:linenos:
[In]instruments(['000001.XSHE', '000024.XSHE'])
[Out]
[Instrument(order_book_id=000001.XSHE, symbol=平安银行, abbrev_symbol=PAYH, listed_date=19910403, de_listed_date=null, board_type=MainBoard, sector_code_name=金融, sector_code=Financials, round_lot=100, exchange=XSHE, special_type=Normal, status=Active), Instrument(order_book_id=000024.XSHE, symbol=招商地产, abbrev_symbol=ZSDC, listed_date=19930607, de_listed_date=null, board_type=MainBoard, sector_code_name=金融, sector_code=Financials, round_lot=100, exchange=XSHE, special_type=Normal, status=Active)]
* 获取合约已上市天数:
.. code-block:: python
:linenos:
instruments('000001.XSHE').days_from_listed()
* 获取合约距离到期天数:
.. code-block:: python
:linenos:
instruments('IF1701').days_to_expire()
"""
return Environment.get_instance().data_proxy.instruments(id_or_symbols)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('code').is_instance_of((str, SectorCodeItem)))
def sector(code):
if not isinstance(code, six.string_types):
code = code.name
else:
code = to_sector_name(code)
return Environment.get_instance().data_proxy.sector(code)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('code').is_instance_of((str, IndustryCodeItem)))
def industry(code):
if not isinstance(code, six.string_types):
code = code.code
else:
code = to_industry_code(code)
return Environment.get_instance().data_proxy.industry(code)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def concept(*concept_names):
return Environment.get_instance().data_proxy.concept(*concept_names)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('start_date').is_valid_date(ignore_none=False))
@apply_rules(verify_that('end_date').is_valid_date(ignore_none=False))
def get_trading_dates(start_date, end_date):
"""
获取某个国家市场的交易日列表(起止日期加入判断)。目前仅支持中国市场。
:param start_date: 开始日期
:type start_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:param end_date: 结束如期
:type end_date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: list[`datetime.date`]
:example:
.. code-block:: python3
:linenos:
[In]get_trading_dates(start_date='2016-05-05', end_date='20160505')
[Out]
[datetime.date(2016, 5, 5)]
"""
return Environment.get_instance().data_proxy.get_trading_dates(start_date, end_date)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('date').is_valid_date(ignore_none=False))
def get_previous_trading_date(date):
"""
获取指定日期的上一交易日。
:param date: 指定日期
:type date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: `datetime.date`
:example:
.. code-block:: python3
:linenos:
[In]get_previous_trading_date(date='2016-05-02')
[Out]
[datetime.date(2016, 4, 29)]
"""
return Environment.get_instance().data_proxy.get_previous_trading_date(date)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('date').is_valid_date(ignore_none=False))
def get_next_trading_date(date):
"""
获取指定日期的下一交易日
:param date: 指定日期
:type date: `str` | `date` | `datetime` | `pandas.Timestamp`
:return: `datetime.date`
:example:
.. code-block:: python3
:linenos:
[In]get_next_trading_date(date='2016-05-01')
[Out]
[datetime.date(2016, 5, 3)]
"""
return Environment.get_instance().data_proxy.get_next_trading_date(date)
def to_date(date):
if isinstance(date, six.string_types):
return parse(date).date()
if isinstance(date, datetime.date):
try:
return date.date()
except AttributeError:
return date
raise RQInvalidArgument('unknown date value: {}'.format(date))
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('order_book_id').is_valid_instrument(),
verify_that('start_date').is_valid_date(ignore_none=False),
verify_that('adjusted').is_instance_of(bool))
def get_dividend(order_book_id, start_date, adjusted=True):
env = Environment.get_instance()
dt = env.trading_dt.date() - datetime.timedelta(days=1)
start_date = to_date(start_date)
if start_date > dt:
raise RQInvalidArgument(
_(u"in get_dividend, start_date {} is later than the previous test day {}").format(
start_date, dt
))
order_book_id = assure_order_book_id(order_book_id)
df = env.data_proxy.get_dividend(order_book_id, adjusted)
return df[start_date:dt]
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('series_name').is_instance_of(str),
verify_that('value').is_number())
def plot(series_name, value):
"""
Add a point to custom series.
:param str series_name: the name of custom series
:param float value: the value of the series in this time
:return: None
"""
Environment.get_instance().add_plot(series_name, value)
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.ON_TICK,
EXECUTION_PHASE.SCHEDULED)
@apply_rules(verify_that('id_or_symbol').is_valid_instrument())
def current_snapshot(id_or_symbol):
"""
获得当前市场快照数据。只能在日内交易阶段调用,获取当日调用时点的市场快照数据。市场快照数据记录了每日从开盘到当前的数据信息,可以理解为一个动态的day bar数据。在目前分钟回测中,快照数据为当日所有分钟线累积而成,一般情况下,最后一个分钟线获取到的快照数据应当与当日的日线行情保持一致。需要注意,在实盘模拟中,该函数返回的是调用当时的市场快照情况,所以在同一个handle_bar中不同时点调用可能返回的数据不同。如果当日截止到调用时候对应股票没有任何成交,那么snapshot中的close, high, low, last几个价格水平都将以0表示。
:param str order_book_id: 合约代码或简称
:return: :class:`~Snapshot`
:example:
在handle_bar中调用该函数,假设策略当前时间是20160104 09:33:
.. code-block:: python3
:linenos:
[In]
logger.info(current_snapshot('000001.XSHE'))
[Out]
2016-01-04 09:33:00.00 INFO
Snapshot(order_book_id: '000001.XSHE', datetime: datetime.datetime(2016, 1, 4, 9, 33), open: 10.0, high: 10.025, low: 9.9667, last: 9.9917, volume: 2050320, total_turnover: 20485195, prev_close: 9.99)
"""
env = Environment.get_instance()
frequency = env.config.base.frequency
order_book_id = assure_order_book_id(id_or_symbol)
return env.data_proxy.current_snapshot(order_book_id, frequency, env.calendar_dt)
| mit |
haudren/scipy | scipy/integrate/quadrature.py | 33 | 28087 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results to speed up calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_p_roots(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
jgowans/correlation_plotter | plot_f_engine.py | 1 | 2960 | #!/usr/bin/env python
# Note: all frequencies in MHz, all times in us
import corr
import numpy as np
import itertools
import matplotlib.pyplot as plt
import time
from operator import add
def snaps():
return ['ch0_00_re', 'ch0_00_im', 'ch0_01_re', 'ch0_01_im']
def arm_snaps():
for snap in snaps():
fpga.snapshot_arm(snap)
def get_snap(snap):
raw = fpga.snapshot_get(snap, arm=False)['data']
unpacked = np.frombuffer(raw, dtype=np.int32)
return unpacked
def re_sync():
fpga.write_int('sync_gen_sync', 0)
fpga.write_int('sync_gen_sync', 1)
fpga.write_int('sync_gen_sync', 0)
def get_sync_time():
re_sync()
fpga.write_int('sync_gen_latch_reset', 0)
while fpga.read_uint('sync_gen_latch') == 0:
pass
t0 = time.time()
fpga.write_int('sync_gen_latch_reset', 1)
fpga.write_int('sync_gen_latch_reset', 0)
while fpga.read_uint('sync_gen_latch') == 0:
pass
delta_t = time.time() - t0
print("sync time: {t}".format(t=delta_t))
return delta_t
def get_acc_time():
fpga.write_int('new_acc_latch_reset', 0)
while fpga.read_uint('new_acc_latch') == 0:
pass
t0 = time.time()
fpga.write_int('new_acc_latch_reset', 1)
fpga.write_int('new_acc_latch_reset', 0)
while fpga.read_uint('new_acc_latch') == 0:
pass
delta_t = time.time() - t0
print("accumulation time: {t}".format(t=delta_t))
return delta_t
def plot_power():
raw = fpga.snapshot_get('ch0_snap1')['data']
unpacked = np.frombuffer(raw, np.dtype('>i4'))
fft_ax = np.linspace(0, 400, len(unpacked))
plt.plot(fft_ax, unpacked)
plt.show()
def plot_cross():
raw = fpga.snapshot_get('ch0_snap')['data']
unpacked = np.frombuffer(raw, np.dtype('>i4')) # big endian 4 byte ints
re = unpacked[1::2]
im = unpacked[0::2]
assert(len(re) == len(im))
fft = []
for i in range(0, len(re)):
fft.append(re[i] + (1j * im[i]))
fft_ax = np.linspace(0, 400, len(fft))
fig = plt.figure()
ax_fft_mag = fig.add_subplot(211)
ax_fft_phase = fig.add_subplot(212, sharex=ax_fft_mag)
ax_fft_mag.plot(fft_ax, np.abs(fft))
ax_fft_phase.plot(fft_ax, np.angle(fft, deg=True))
plt.show()
class FFTData:
def __init__(self, signal, fs, f_min, f_max):
self.signal = signal
self.fs = fs
self.axis = np.linspace(0, fs, len(signal), endpoint=False)
def find_peak(self, f_min, f_max):
'''
Finds the peak frequency and amplitude in a given range.
Returns (f, amp)
'''
pass
fpga = corr.katcp_wrapper.FpgaClient('localhost')
time.sleep(0.1)
fpga.write_int('acc_len', 4096)
#fpga.write_int('acc_len', 65536)
#fpga.write_int('acc_len', 128)
fpga.write_int('fft_shift', 2**12 - 1)
time.sleep(1)
fpga.write_int('acc_rst', 1)
fpga.write_int('acc_rst', 0)
#re_sync()
#get_acc_time()
re_sync()
time.sleep(1)
while True:
# plot_power()
plot_cross()
| mit |
wesm/statsmodels | scikits/statsmodels/tsa/ar_model.py | 1 | 32245 | """
This is the VAR class refactored from pymaclab.
"""
from __future__ import division
import numpy as np
from numpy import (dot, identity, atleast_2d, atleast_1d, zeros)
from numpy.linalg import inv
from scipy import optimize
from scipy.stats import t, norm, ss as sumofsq
from scikits.statsmodels.regression.linear_model import OLS
from scikits.statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import scikits.statsmodels.tsa.base.tsa_model as tsbase
import scikits.statsmodels.base.model as base
from scikits.statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from scikits.statsmodels.tools.compatibility import np_slogdet
from scikits.statsmodels.sandbox.regression.numdiff import approx_fprime
from scikits.statsmodels.sandbox.regression.numdiff import (approx_hess,
approx_hess_cs)
from scikits.statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import scikits.statsmodels.base.wrapper as wrap
from scikits.statsmodels.tsa.vector_ar import util
__all__ = ['AR']
class AR(tsbase.TimeSeriesModel):
"""
Autoregressive AR(p) Model
Parameters
----------
endog : array-like
Endogenous response variable.
date : array-like
Dates of the endogenous variable.
"""
def __init__(self, endog, dates=None, freq=None):
super(AR, self).__init__(endog, None, dates, freq)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:,None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p,1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat,T_mat)),dot(R_mat,
R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p,p, order='F') #TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in xrange(end): #iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat,alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat,P),Z_mat.T),Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K,v_mat)
L = T_mat - dot(K,Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
# P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start-1: #only record if we ask for it
predictedvalues[i+1-start] = dot(Z_mat,alpha)
def _get_predict_start(self, start):
if start is None:
if self.method == 'mle':
start = 0
else: # can't do presample fit for cmle
start = self.k_ar
if self.method == 'cmle':
if start < self.k_ar:
raise ValueError("Start must be >= k_ar")
return super(AR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, method='static'):
"""
Returns in-sample prediction or forecasts.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
method : string {'dynamic', 'static'}
If method is 'dynamic', then fitted values are used in place of
observed 'endog' to make forecasts. If 'static', observed 'endog'
are used. Only 'static' is currently implemented.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
start = self._get_predict_start(start) # will be an index of a date
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_ar = self.k_ar
y = self.endog[:k_ar]
nobs = int(self.endog.shape[0])
k_trend = self.k_trend
method = self.method
predictedvalues = np.zeros(end+1-start + out_of_sample)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
y-mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# fit in-sample
# just do the whole thing and then truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
#fv_end = min(len(fittedvalues), len(fittedvalues) - end)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:pv_end+pv_start] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
endog = np.r_[self.endog[-k_ar:], [[0]]*out_of_sample]
params = params.copy()
mu = params[:k_trend] or 0
params = params[k_trend:][::-1]
for i in range(out_of_sample):
fcast = mu + np.dot(params, endog[i:i+k_ar])
predictedvalues[-out_of_sample+i] = fcast
endog[i+k_ar] = fcast
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p,p), dtype=params.dtype)
for i in range(1,p1):
Vpinv[i-1,i-1:] = np.correlate(params0, params0[:i])[:-1]
Vpinv[i-1,i-1:] -= np.correlate(params0[-i:], params0)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze()-np.dot(X,params))
sigma2 = ssr/nobs
return -nobs/2 * (np.log(2*np.pi) + np.log(sigma2)) -\
ssr/(2*sigma2)
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
Y = self.Y
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c/(1-np.sum(params[k_trend:])))
diffp = yp-mup[:,None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T,Vpinv),diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() -np.dot(X,params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = np_slogdet(Vpinv)[1] #TODO: add check for singularity
loglike = -1/2.*(nobs*(np.log(2*np.pi) + np.log(sigma2)) - \
logdet + diffpVpinv/sigma2 + ssr/sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)[0]
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
trend = self.trend
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
method = self.method
k = self.k_trend # k_trend set in _stackX
k = max(1,k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k,maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k,res in results.iteritems())
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag,k-1,-1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=full_output, trend=trend,
maxiter=maxiter, disp=disp)
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver=None, maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hq - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used. The default is 'l_bfgs' (limited memory Broyden-
Fletcher-Goldfarb-Shanno). Other choices are 'bfgs', 'newton'
(Newton-Raphson), 'nm' (Nelder-Mead), 'cg' - (conjugate gradient),
'ncg' (non-conjugate gradient), and 'powell'.
The limited memory BFGS uses m=30 to approximate the Hessian,
projected gradient tolerance of 1e-7 and factr = 1e3. These
cannot currently be changed for l_bfgs. See notes for more
information.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
scikits.statsmodels.model.LikelihoodModel.fit for more information
on using the solvers.
Notes
------
The below is the docstring from
scikits.statsmodels.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle','yw','mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic','bic','hqic','t-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:,:]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
k = k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if solver:
solver = solver.lower()
if method == "cmle": # do OLS
arfit = OLS(Y,X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs #needed for predict fcasterr
if method == "mle":
self.nobs = nobs
if not start_params:
start_params = OLS(Y,X).fit().params
start_params = self._invtransparams(start_params)
loglike = lambda params : -self.loglike(params)
if solver == None: # use limited memory bfgs
bounds = [(None,)*2]*(k_ar+k)
mlefit = optimize.fmin_l_bfgs_b(loglike, start_params,
approx_grad=True, m=12, pgtol=1e-8, factr=1e2,
bounds=bounds, iprint=disp)
self.mlefit = mlefit
params = mlefit[0]
else:
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback = callback, **kwargs)
self.mlefit = mlefit
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
return ARResultsWrapper(arfit)
fit.__doc__ += base.LikelihoodModel.fit.__doc__
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1+k_ar)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1+k_ar)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the pre-sample
residuals are calculated using fittedvalues from the Kalman Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0, 'ct' = 1,
etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = 1
#TODO: cmle vs mle?
self.df_resid = self.model.df_resid = n_totobs - k_ar - k_trend
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1./self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma definition
resid = self.resid
ssr = np.dot(resid,resid)
ols_scale = ssr/(self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess[0])))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.k_ar)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs * \
(1 + self.k_ar)
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
k_ar = self.k_ar
k_trend = self.k_trend
#Lutkepohl
return ((nobs+k_ar+k_trend)/(nobs-k_ar-k_trend))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.k_ar) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, method='static'):
params = self.params
predictedvalues = self.model.predict(params, start, end, method)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from scikits.statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = """ confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval.""".split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import scikits.statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DateRange(start=d1.datetime,
periods=len(sunspots.endog), timeRule='A@DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700,1700+len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.TimeSeries(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(map(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.TimeSeries(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
#NOTE: If you use timeseries, predict is buggy
#mod = AR(sunspots.values, dates=ts_dr, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460,457,452,459,462,459,463,479,493,490.])
w = np.diff(IBM)
theta = .5
| bsd-3-clause |
arter97/android_kernel_nvidia_shieldtablet | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
eg-zhang/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
sanuj/shogun | examples/undocumented/python_modular/graphical/metric_lmnn_objective.py | 26 | 2350 | #!/usr/bin/env python
def load_compressed_features(fname_features):
try:
import gzip
import numpy
except ImportError:
print 'Error importing gzip and/or numpy modules. Please, verify their installation.'
import sys
sys.exit(0)
# load features from a gz compressed file
file_features = gzip.GzipFile(fname_features)
str_features = file_features.read()
file_features.close()
strlist_features = str_features.split('\n')[:-1] # all but last because the last line also has \n
# the number of lines in the file is the number of vectors
num_vectors = len(strlist_features)
# the number of elements in a line is the number of features
num_features = len(strlist_features[0].split())
# memory pre-allocation for the feature matrix
fm = numpy.zeros((num_vectors, num_features))
# fill in feature matrix
for i in xrange(num_vectors):
try:
fm[i,:] = map(numpy.float64, strlist_features[i].split())
except ValuError:
print 'All the vectors must have the same number of features.'
import sys
sys.exit(0)
return fm
def metric_lmnn_statistics(k=3, fname_features='../../data/fm_train_multiclass_digits.dat.gz', fname_labels='../../data/label_train_multiclass_digits.dat'):
try:
from modshogun import LMNN, CSVFile, RealFeatures, MulticlassLabels, MSG_DEBUG
import matplotlib.pyplot as pyplot
except ImportError:
print 'Error importing modshogun or other required modules. Please, verify their installation.'
return
features = RealFeatures(load_compressed_features(fname_features).T)
labels = MulticlassLabels(CSVFile(fname_labels))
# print 'number of examples = %d' % features.get_num_vectors()
# print 'number of features = %d' % features.get_num_features()
assert(features.get_num_vectors() == labels.get_num_labels())
# train LMNN
lmnn = LMNN(features, labels, k)
lmnn.set_correction(100)
# lmnn.io.set_loglevel(MSG_DEBUG)
print 'Training LMNN, this will take about two minutes...'
lmnn.train()
print 'Training done!'
# plot objective obtained during training
statistics = lmnn.get_statistics()
pyplot.plot(statistics.obj.get())
pyplot.grid(True)
pyplot.xlabel('Iterations')
pyplot.ylabel('LMNN objective')
pyplot.title('LMNN objective during training for the multiclass digits data set')
pyplot.show()
if __name__=='__main__':
print('LMNN objective')
metric_lmnn_statistics()
| gpl-3.0 |
stefanosbou/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
probml/pyprobml | scripts/linreg_eb_modelsel_vs_n.py | 1 | 5689 | # Bayesian model selection demo for polynomial regression
# This illustartes that if we have more data, Bayes picks a more complex model.
# Based on a demo by Zoubin Ghahramani
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import random
random.seed(0)
Ns = [5, 30] #Number of points
degs = [1, 2, 3] #Degrees of linear regression models.
def polyBasis(x, deg):
#Takes a vector and returns a polynomial basis matrix up to degree deg (not including ones)
return(np.column_stack([x**deg for deg in range(1, deg+1)]))
def linregFitBayes(X, ytrain, **kwargs):
#Bayesian inference for a linear regression model.
#The model is p(y | x) = N(y | w*[1 x], (1/beta)) so beta is the precision of the measurement noise
# OUTPUTS model contains the parameters of the posterior, suitable for input to linregPredictBayes.
# logev is the log marginal likelihood
#This function is structured so it can be expanded with the full set of options provided in
#linregFitBayes.m in pmtk3.
if kwargs['prior'] == 'EB':
#args = {v for k, v in kwargs.items() if k not in 'preproc'}
[model, logev, LHist] = linregFitEB(X, ytrain, kwargs['preproc'], maxIter=kwargs['maxIter'])
else:
raise ValueError('Unrecognized Prior type given')
model['modelType'] = 'linregBayes'
model['prior'] = kwargs['prior']
return model, logev
def preprocessorApplyToTrain(preproc, X):
if('addOnes' in preproc.keys() and preproc['addOnes']):
X = np.column_stack((np.ones(X.shape[0]), X))
return preproc, X
def linregFitEB(X, y, preproc, **kwargs):
#This closely follows the code of linregFitEbChen giving in pmtk3/toolbox
preproc, X = preprocessorApplyToTrain(preproc, X)
N, M = X.shape
XX = np.dot(np.transpose(X), X)
XX2 = np.dot(X, np.transpose(X))
Xy = np.dot(np.transpose(X), y)
#This method can get stuck in local minima, so we should do multiple restarts.
alpha = 0.01 #initially don't trust the prior
beta = 1.0 #initially trust the data
L_old = - float('inf')
Lhist = np.empty((kwargs['maxIter'], 1))
for i in range(kwargs['maxIter']):
if(N > M):
T = alpha*np.identity(M) + XX*beta
cholT = np.transpose(np.linalg.cholesky(T))
Ui = np.linalg.inv(cholT)
Sn = np.dot(Ui, np.transpose(Ui))
logdetS = - 2 * sum(np.log(np.diag(cholT)))
else:
T = np.identity(N) / beta + XX2 / alpha
cholT = np.transpose(np.linalg.cholesky(T))
print(T)
Ui = np.linalg.inv(cholT)
XU = np.dot(np.transpose(X), Ui)
Sn = np.identity(M) / alpha - np.dot(XU, np.transpose(XU))/alpha/alpha
logdetS = sum(np.log(np.diag(cholT))) * 2 + M * np.log(alpha) + N * np.log(beta)
logdetS = - logdetS
mn = beta*np.dot(Sn, Xy)
t1 = sum((y - np.dot(X, mn))*(y - np.dot(X, mn)))
t2 = np.dot(np.transpose(mn), mn)
#M = float(M)
#N = float(N)
gamma = M - alpha * np.trace(Sn)
beta = (N - gamma) / t1
L = M * np.log(alpha) - N * np.log(2 * np.pi) + N * np.log(beta) - beta * t1 - alpha * t2 + logdetS
L = L / 2
Lhist[i] = L
if abs(L - L_old) < 1e-2:
break
else:
L_old = L
alpha = gamma/t2
model = {'wN': mn, 'VN': Sn, 'beta': beta, 'alpha': alpha, 'gamma': gamma, 'preproc': preproc}
return model, L, Lhist
def linregPredictBayes(model, X):
#This accepts a model of the form produced by linregFitBayes and an array of X to form posterior predictions
[_, X] = preprocessorApplyToTrain(model['preproc'], X)
yhat = np.dot(X, model['wN'])
sigma2Hat = (1.0/model['beta']) + np.diag(np.dot(np.dot(X, model['VN']), np.transpose(X)))
return yhat, sigma2Hat
#We loop over each setting for the number of data points
for n in Ns:
x1d = np.random.uniform(0, 10, n) #input points
e = np.random.normal(0, 1, n) #noise
ytrain = (x1d - 4.0)**2 + 5.0*e #observed y
plotvals1d = np.arange(-2.0, 12.1, .1) #grid for plotting/testing
trueOutput = (plotvals1d - 4.0) ** 2 #true function
logevs = []
#We loop over the number of degree in our regression.
for deg in degs:
X = polyBasis(x1d, deg) #Polynomial basis
pp = {'addOnes': True} #Setting for feature preprocessing
[mod, logev] = linregFitBayes(X, ytrain, prior='EB', preproc=pp, maxIter=20) #Fit the model
logevs.append(logev)
Xtest = polyBasis(plotvals1d, deg) #Grid to test our prediction on
mu, sig2 = linregPredictBayes(mod, Xtest)
sig2 = np.sqrt(sig2)
#Form line graph
fig, ax = plt.subplots()
plt.scatter(x1d, ytrain, s=140, facecolors='none', edgecolors='k')
lower = mu - sig2
upper = mu + sig2
plt.plot(plotvals1d, trueOutput, 'g', plotvals1d, mu, 'r--', linewidth=2)
plt.plot(plotvals1d, lower, 'b-' , plotvals1d, upper, 'b-', linewidth=0.5)
plt.title('d={}, logev={:0.2f}, EB'.format(deg, logev))
save_fig('linregEbModelSelVsN{}D{}EB.pdf'.format(n, deg))
plt.draw()
#Form bar graph showing the posterior probabilities for each model
PP = np.exp(logevs)
PP = PP/sum(PP)
fig, ax = plt.subplots()
ax.bar(list(range(len(PP))), PP, align='center')
plt.xticks(list(range(len(PP))))
plt.ylim([0, 1])
ax.set_ylabel('P(M|D)')
plt.title('N={}'.format(n))
save_fig('linregEbModelSelVsN{}PostEB.pdf'.format(n))
plt.draw()
plt.show()
| mit |
ldirer/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 47 | 4159 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='black', s=25)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6, edgecolors='black', s=25)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/groupby/test_counting.py | 10 | 6573 | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from pandas import (DataFrame, Series, MultiIndex)
from pandas.util.testing import assert_series_equal
from pandas.compat import (range, product as cart_product)
class TestCounting(object):
def test_cumcount(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'])
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3])
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.cumcount())
assert_series_equal(e, se.cumcount())
def test_cumcount_dupe_index(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=mi)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_cumcount_groupby_not_col(self):
df = DataFrame([['a'], ['a'], ['a'], ['b'], ['a']], columns=['A'],
index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 1, 2, 0, 3], index=[0] * 5)
assert_series_equal(expected, g.cumcount())
assert_series_equal(expected, sg.cumcount())
def test_ngroup(self):
df = DataFrame({'A': list('aaaba')})
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0])
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_distinct(self):
df = DataFrame({'A': list('abcde')})
g = df.groupby('A')
sg = g.A
expected = Series(range(5), dtype='int64')
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_one_group(self):
df = DataFrame({'A': [0] * 5})
g = df.groupby('A')
sg = g.A
expected = Series([0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_empty(self):
ge = DataFrame().groupby(level=0)
se = Series().groupby(level=0)
# edge case, as this is usually considered float
e = Series(dtype='int64')
assert_series_equal(e, ge.ngroup())
assert_series_equal(e, se.ngroup())
def test_ngroup_series_matches_frame(self):
df = DataFrame({'A': list('aaaba')})
s = Series(list('aaaba'))
assert_series_equal(df.groupby(s).ngroup(),
s.groupby(s).ngroup())
def test_ngroup_dupe_index(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_mi(self):
mi = MultiIndex.from_tuples([[0, 1], [1, 2], [2, 2], [2, 2], [1, 0]])
df = DataFrame({'A': list('aaaba')}, index=mi)
g = df.groupby('A')
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=mi)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_groupby_not_col(self):
df = DataFrame({'A': list('aaaba')}, index=[0] * 5)
g = df.groupby([0, 0, 0, 1, 0])
sg = g.A
expected = Series([0, 0, 0, 1, 0], index=[0] * 5)
assert_series_equal(expected, g.ngroup())
assert_series_equal(expected, sg.ngroup())
def test_ngroup_descending(self):
df = DataFrame(['a', 'a', 'b', 'a', 'b'], columns=['A'])
g = df.groupby(['A'])
ascending = Series([0, 0, 1, 0, 1])
descending = Series([1, 1, 0, 1, 0])
assert_series_equal(descending, (g.ngroups - 1) - ascending)
assert_series_equal(ascending, g.ngroup(ascending=True))
assert_series_equal(descending, g.ngroup(ascending=False))
def test_ngroup_matches_cumcount(self):
# verify one manually-worked out case works
df = DataFrame([['a', 'x'], ['a', 'y'], ['b', 'x'],
['a', 'x'], ['b', 'y']], columns=['A', 'X'])
g = df.groupby(['A', 'X'])
g_ngroup = g.ngroup()
g_cumcount = g.cumcount()
expected_ngroup = Series([0, 1, 2, 0, 3])
expected_cumcount = Series([0, 0, 0, 1, 0])
assert_series_equal(g_ngroup, expected_ngroup)
assert_series_equal(g_cumcount, expected_cumcount)
def test_ngroup_cumcount_pair(self):
# brute force comparison for all small series
for p in cart_product(range(3), repeat=4):
df = DataFrame({'a': p})
g = df.groupby(['a'])
order = sorted(set(p))
ngroupd = [order.index(val) for val in p]
cumcounted = [p[:i].count(val) for i, val in enumerate(p)]
assert_series_equal(g.ngroup(), Series(ngroupd))
assert_series_equal(g.cumcount(), Series(cumcounted))
def test_ngroup_respects_groupby_order(self):
np.random.seed(0)
df = DataFrame({'a': np.random.choice(list('abcdef'), 100)})
for sort_flag in (False, True):
g = df.groupby(['a'], sort=sort_flag)
df['group_id'] = -1
df['group_index'] = -1
for i, (_, group) in enumerate(g):
df.loc[group.index, 'group_id'] = i
for j, ind in enumerate(group.index):
df.loc[ind, 'group_index'] = j
assert_series_equal(Series(df['group_id'].values),
g.ngroup())
assert_series_equal(Series(df['group_index'].values),
g.cumcount())
| apache-2.0 |
nekrut/tools-iuc | tools/cwpair2/cwpair2_util.py | 19 | 14130 | import bisect
import csv
import os
import sys
import traceback
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot # noqa: I202,E402
# Data outputs
DETAILS = 'D'
MATCHED_PAIRS = 'MP'
ORPHANS = 'O'
# Data output formats
GFF_EXT = 'gff'
TABULAR_EXT = 'tabular'
# Statistics histograms output directory.
HISTOGRAM = 'H'
# Statistics outputs
FINAL_PLOTS = 'F'
PREVIEW_PLOTS = 'P'
STATS_GRAPH = 'C'
# Graph settings.
COLORS = 'krg'
Y_LABEL = 'Peak-pair counts'
X_LABEL = 'Peak-pair distance (bp)'
TICK_WIDTH = 3
ADJUST = [0.140, 0.9, 0.9, 0.1]
PLOT_FORMAT = 'pdf'
pyplot.rc('xtick.major', size=10.00)
pyplot.rc('ytick.major', size=10.00)
pyplot.rc('lines', linewidth=4.00)
pyplot.rc('axes', linewidth=3.00)
pyplot.rc('font', family='Bitstream Vera Sans', size=32.0)
class FrequencyDistribution(object):
def __init__(self, start, end, binsize=10, d=None):
self.start = start
self.end = end
self.dist = d or {}
self.binsize = binsize
def get_bin(self, x):
"""
Returns the centre of the bin in which a data point falls
"""
return self.start + (x - self.start) // self.binsize * self.binsize + self.binsize / 2.0
def add(self, x):
x = self.get_bin(x)
self.dist[x] = self.dist.get(x, 0) + 1
def graph_series(self):
x = []
y = []
for i in range(self.start, self.end, self.binsize):
center = self.get_bin(i)
x.append(center)
y.append(self.dist.get(center, 0))
return x, y
def mode(self):
# There could be more than one mode for a frequency distribution,
# return the median of the modes to be consistent
max_frequency = max(self.dist.values())
modes = sorted(_[0] for _ in self.dist.items() if _[1] == max_frequency)
median_index = len(modes) // 2
return modes[median_index]
def size(self):
return sum(self.dist.values())
def stop_err(msg):
sys.stderr.write(msg)
sys.exit(1)
def distance(peak1, peak2):
return (peak2[1] + peak2[2]) / 2.0 - (peak1[1] + peak1[2]) / 2.0
def gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}):
return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs))
def gff_attrs(d):
if not d:
return '.'
return ';'.join('%s=%s' % item for item in d.items())
def parse_chromosomes(reader):
# This version of cwpair2 accepts only gff format as input.
chromosomes = {}
for line in reader:
line = line.rstrip("\r\n")
if not line or line.startswith('#'):
continue
cname, _, _, start, end, value, strand, _, _ = line.split("\t")
start = int(start)
end = int(end)
value = float(value)
if cname not in chromosomes:
chromosomes[cname] = []
peaks = chromosomes[cname]
peaks.append((strand, start, end, value))
return chromosomes
def perc95(chromosomes):
"""
Returns the 95th percentile value of the given chromosomes.
"""
values = []
for peaks in chromosomes.values():
for peak in peaks:
values.append(peak[3])
values.sort()
# Get 95% value
return values[int(len(values) * 0.95)]
def peak_filter(chromosomes, threshold):
"""
Filters the peaks to those above a threshold. Threshold < 1.0 is interpreted
as a proportion of the maximum, >=1.0 as an absolute value.
"""
if threshold < 1:
p95 = perc95(chromosomes)
threshold = p95 * threshold
# Make the threshold a proportion of the
for cname, peaks in chromosomes.items():
chromosomes[cname] = [peak for peak in peaks if peak[3] > threshold]
def split_strands(chromosome):
watson = [peak for peak in chromosome if peak[0] == '+']
crick = [peak for peak in chromosome if peak[0] == '-']
return watson, crick
def all_pair_distribution(chromosomes, up_distance, down_distance, binsize):
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
for data in chromosomes.values():
watson, crick = split_strands(data)
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
for cpeak in get_window(crick, peak, up_distance, down_distance, keys):
dist.add(distance(peak, cpeak))
return dist
def make_keys(crick):
return [(data[1] + data[2]) // 2 for data in crick]
def get_window(crick, peak, up_distance, down_distance, keys=None):
"""
Returns a window of all crick peaks within a distance of a watson peak.
crick strand MUST be sorted by distance
"""
strand, start, end, value = peak
midpoint = (start + end) // 2
lower = midpoint - up_distance
upper = midpoint + down_distance
keys = keys or make_keys(crick)
start_index = bisect.bisect_left(keys, lower)
end_index = bisect.bisect_right(keys, upper)
return [cpeak for cpeak in crick[start_index:end_index]]
def match_largest(window, peak):
if not window:
return None
return max(window, key=lambda cpeak: cpeak[3])
def match_closest(window, peak):
if not window:
return None
def key(cpeak):
d = distance(peak, cpeak)
# Search negative distances last
if d < 0:
# And then prefer less negative distances
d = 10000 - d
return d
return min(window, key=key)
def match_mode(window, peak, mode):
if not window:
return None
return min(window, key=lambda cpeak: abs(distance(peak, cpeak) - mode))
METHODS = {'mode': match_mode, 'closest': match_closest, 'largest': match_largest}
def frequency_plot(freqs, fname, labels=[], title=''):
pyplot.clf()
pyplot.figure(figsize=(10, 10))
for i, freq in enumerate(freqs):
x, y = freq.graph_series()
pyplot.plot(x, y, '%s-' % COLORS[i])
if len(freqs) > 1:
pyplot.legend(labels)
pyplot.xlim(freq.start, freq.end)
pyplot.ylim(ymin=0)
pyplot.ylabel(Y_LABEL)
pyplot.xlabel(X_LABEL)
pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3])
# Get the current axes
ax = pyplot.gca()
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markeredgewidth(TICK_WIDTH)
pyplot.savefig(fname)
def create_directories():
# Output histograms in pdf.
os.mkdir(HISTOGRAM)
os.mkdir('data_%s' % DETAILS)
os.mkdir('data_%s' % ORPHANS)
os.mkdir('data_%s' % MATCHED_PAIRS)
def process_file(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
if method == 'all':
match_methods = METHODS.keys()
else:
match_methods = [method]
statistics = []
for match_method in match_methods:
stats = perform_process(dataset_path,
galaxy_hid,
match_method,
threshold,
up_distance,
down_distance,
binsize,
output_files)
statistics.append(stats)
if output_files == 'all' and method == 'all':
frequency_plot([s['dist'] for s in statistics],
statistics[0]['graph_path'],
labels=list(METHODS.keys()))
return statistics
def perform_process(dataset_path, galaxy_hid, method, threshold, up_distance,
down_distance, binsize, output_files):
output_details = output_files in ["all", "matched_pair_orphan_detail"]
output_plots = output_files in ["all"]
output_orphans = output_files in ["all", "matched_pair_orphan", "matched_pair_orphan_detail"]
# Keep track of statistics for the output file
statistics = {}
fpath, fname = os.path.split(dataset_path)
statistics['fname'] = '%s: data %s' % (method, str(galaxy_hid))
statistics['dir'] = fpath
if threshold >= 1:
filter_string = 'fa%d' % threshold
else:
filter_string = 'f%d' % (threshold * 100)
fname = '%s_%su%dd%d_on_data_%s' % (method, filter_string, up_distance, down_distance, galaxy_hid)
def make_histogram_path(output_type, fname):
return os.path.join(HISTOGRAM, 'histogram_%s_%s.%s' % (output_type, fname, PLOT_FORMAT))
def make_path(output_type, extension, fname):
# Returns the full path for an output.
return os.path.join(output_type, '%s_%s.%s' % (output_type, fname, extension))
def td_writer(output_type, extension, fname):
# Returns a tab-delimited writer for a specified output.
output_file_path = make_path(output_type, extension, fname)
return csv.writer(open(output_file_path, 'wt'), delimiter='\t', lineterminator="\n")
with open(dataset_path, 'rt') as input:
try:
chromosomes = parse_chromosomes(input)
except Exception:
stop_err('Unable to parse file "%s".\n%s' % (dataset_path, traceback.format_exc()))
if output_details:
# Details
detailed_output = td_writer('data_%s' % DETAILS, TABULAR_EXT, fname)
detailed_output.writerow(('chrom', 'start', 'end', 'value', 'strand') * 2 + ('midpoint', 'c-w reads sum', 'c-w distance (bp)'))
if output_plots:
# Final Plot
final_plot_path = make_histogram_path(FINAL_PLOTS, fname)
if output_orphans:
# Orphans
orphan_output = td_writer('data_%s' % ORPHANS, TABULAR_EXT, fname)
orphan_output.writerow(('chrom', 'strand', 'start', 'end', 'value'))
if output_plots:
# Preview Plot
preview_plot_path = make_histogram_path(PREVIEW_PLOTS, fname)
# Matched Pairs.
matched_pairs_output = td_writer('data_%s' % MATCHED_PAIRS, GFF_EXT, fname)
statistics['stats_path'] = 'statistics.%s' % TABULAR_EXT
if output_plots:
statistics['graph_path'] = make_histogram_path(STATS_GRAPH, fname)
statistics['perc95'] = perc95(chromosomes)
if threshold > 0:
# Apply peak_filter
peak_filter(chromosomes, threshold)
if method == 'mode':
freq = all_pair_distribution(chromosomes, up_distance, down_distance, binsize)
mode = freq.mode()
statistics['preview_mode'] = mode
if output_plots:
frequency_plot([freq], preview_plot_path, title='Preview frequency plot')
else:
statistics['preview_mode'] = 'NA'
dist = FrequencyDistribution(-up_distance, down_distance, binsize=binsize)
orphans = 0
# x will be used to archive the summary dataset
x = []
for cname, chromosome in chromosomes.items():
# Each peak is (strand, start, end, value)
watson, crick = split_strands(chromosome)
# Sort by value of each peak
watson.sort(key=lambda data: -float(data[3]))
# Sort by position to facilitate binary search
crick.sort(key=lambda data: float(data[1]))
keys = make_keys(crick)
for peak in watson:
window = get_window(crick, peak, up_distance, down_distance, keys)
if method == 'mode':
match = match_mode(window, peak, mode)
else:
match = METHODS[method](window, peak)
if match:
midpoint = (match[1] + match[2] + peak[1] + peak[2]) // 4
d = distance(peak, match)
dist.add(d)
# Simple output in gff format.
x.append(gff_row(cname,
source='cwpair',
start=midpoint,
end=midpoint + 1,
score=peak[3] + match[3],
attrs={'cw_distance': d}))
if output_details:
detailed_output.writerow((cname,
peak[1],
peak[2],
peak[3],
'+',
cname,
match[1],
match[2],
match[3], '-',
midpoint,
peak[3] + match[3],
d))
i = bisect.bisect_left(keys, (match[1] + match[2]) / 2)
del crick[i]
del keys[i]
else:
if output_orphans:
orphan_output.writerow((cname, peak[0], peak[1], peak[2], peak[3]))
# Keep track of orphans for statistics.
orphans += 1
# Remaining crick peaks are orphans
if output_orphans:
for cpeak in crick:
orphan_output.writerow((cname, cpeak[0], cpeak[1], cpeak[2], cpeak[3]))
# Keep track of orphans for statistics.
orphans += len(crick)
# Sort output descending by score.
x.sort(key=lambda data: float(data[5]), reverse=True)
# Writing a summary to gff format file
for row in x:
row_tmp = list(row)
# Dataset in tuple cannot be modified in Python, so row will
# be converted to list format to add 'chr'.
if row_tmp[0] == "999":
row_tmp[0] = 'chrM'
elif row_tmp[0] == "998":
row_tmp[0] = 'chrY'
elif row_tmp[0] == "997":
row_tmp[0] = 'chrX'
else:
row_tmp[0] = row_tmp[0]
# Print row_tmp.
matched_pairs_output.writerow(row_tmp)
statistics['paired'] = dist.size() * 2
statistics['orphans'] = orphans
statistics['final_mode'] = dist.mode()
if output_plots:
frequency_plot([dist], final_plot_path, title='Frequency distribution')
statistics['dist'] = dist
return statistics
| mit |
henridwyer/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
ningyuwhut/UnbalancedDataset | unbalanced_dataset/under_sampling.py | 1 | 22638 | from __future__ import print_function
from __future__ import division
import numpy as np
from numpy import logical_not, ones
from numpy.random import seed, randint
from numpy import concatenate
from random import sample
from collections import Counter
from .unbalanced_dataset import UnbalancedDataset
class UnderSampler(UnbalancedDataset):
"""
Object to under sample the majority class(es) by randomly picking samples
with or without replacement.
"""
def __init__(self,
ratio=1.,
random_state=None,
replacement=True,
verbose=True):
"""
:param ratio:
The ratio of majority elements to sample with respect to the number
of minority cases.
:param random_state:
Seed.
:return:
underx, undery: The features and target values of the under-sampled
data set.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self,
ratio=ratio,
random_state=random_state,
verbose=verbose)
self.replacement = replacement
def resample(self):
"""
...
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
#如果抽样数大于该类的样本数,则将要抽样的样本数设置为该类的样本数,否则按比例计算
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Pick some elements at random
seed(self.rs)
if self.replacement:#有放回
indx = randint(low=0, high=self.ucd[key], size=num_samples)
else:#无放回
indx = sample(range((self.y == key).sum()), num_samples)
# Concatenate to the minority class
underx = concatenate((underx, self.x[self.y == key][indx]), axis=0)
undery = concatenate((undery, self.y[self.y == key][indx]), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class TomekLinks(UnbalancedDataset):
"""
Object to identify and remove majority samples that form a Tomek link with
minority samples.
"""
def __init__(self, verbose=True):
"""
No parameters.
:return:
Nothing.
"""
UnbalancedDataset.__init__(self, verbose=verbose)
def resample(self):
"""
:return:
Return the data with majority samples that form a Tomek link
removed.
"""
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(self.x)
nns = nn.kneighbors(self.x, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(self.y, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(self.y[logical_not(links)])))
# Return data set without majority Tomek links.
return self.x[logical_not(links)], self.y[logical_not(links)]
class ClusterCentroids(UnbalancedDataset):
"""
Experimental method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans algorithm.
This algorithm keeps N majority samples by fitting the KMeans algorithm
with N cluster to the majority class and using the coordinates of the N
cluster centroids as the new majority samples.
"""
def __init__(self, ratio=1, random_state=None, verbose=True, **kwargs):
"""
:param kwargs:
Arguments the user might want to pass to the KMeans object from
scikit-learn.
:param ratio:
The number of cluster to fit with respect to the number of samples
in the minority class.
N_clusters = int(ratio * N_minority_samples) = N_maj_undersampled.
:param random_state:
Seed.
:return:
Under sampled data set.
"""
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
self.kwargs = kwargs
def resample(self):
"""
???
:return:
"""
# Create the clustering object
from sklearn.cluster import KMeans
kmeans = KMeans(random_state=self.rs)
kmeans.set_params(**self.kwargs)
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it.
if key == self.minc:
continue
# Set the number of clusters to be no more than the number of
# samples
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
n_clusters = self.ucd[key]
else:
n_clusters = int(self.ratio * self.ucd[self.minc])
# Set the number of clusters and find the centroids
kmeans.set_params(n_clusters=n_clusters)
kmeans.fit(self.x[self.y == key])
centroids = kmeans.cluster_centers_
# Concatenate to the minority class
underx = concatenate((underx, centroids), axis=0)
undery = concatenate((undery, ones(n_clusters) * key), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class NearMiss(UnbalancedDataset):
"""
An implementation of NearMiss.
See the original paper: NearMiss - "kNN Approach to Unbalanced Data
Distributions: A Case Study involving Information Extraction" by Zhang
et al. for more details.
"""
def __init__(self, ratio=1., random_state=None,
version=1, size_ngh=3, ver3_samp_ngh=3,
verbose=True, **kwargs):
"""
:param version:
Version of the NearMiss to use. Possible values
are 1, 2 or 3. See the original paper for details
about these different versions.
:param size_ngh:
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param ver3_samp_ngh:
NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected
create the sub_set in which the selection will be performed.
:param **kwargs:
Parameter to use for the Nearest Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, ratio=ratio,
random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
# Check that the version asked is implemented
if not (version == 1 or version == 2 or version == 3):
raise ValueError('UnbalancedData.NearMiss: there is only 3 '
'versions available with parameter version=1/2/3')
self.version = version
self.size_ngh = size_ngh
self.ver3_samp_ngh = ver3_samp_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# For each element of the current class, find the set of NN
# of the minority class
from sklearn.neighbors import NearestNeighbors
# Call the constructor of the NN
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh, **self.kwargs)
# Fit the minority class since that we want to know the distance
# to these point
nn_obj.fit(self.x[self.y == self.minc])
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Set the ratio to be no more than the number of samples available
if self.ratio * self.ucd[self.minc] > self.ucd[key]:
num_samples = self.ucd[key]
else:
num_samples = int(self.ratio * self.ucd[self.minc])
# Get the samples corresponding to the current class
sub_samples_x = self.x[self.y == key]
sub_samples_y = self.y[self.y == key]
if self.version == 1:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 2:
# Find the NN
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.y[self.y == self.minc].size)
# Select the right samples
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='nearest')
elif self.version == 3:
# We need a new NN object to fit the current class
nn_obj_cc = NearestNeighbors(n_neighbors=self.ver3_samp_ngh,
**self.kwargs)
nn_obj_cc.fit(sub_samples_x)
# Find the set of NN to the minority class
dist_vec, idx_vec = nn_obj_cc.kneighbors(self.x[self.y == self.minc])
# Create the subset containing the samples found during the NN
# search. Linearize the indexes and remove the double values
idx_vec = np.unique(idx_vec.reshape(-1))
# Create the subset
sub_samples_x = sub_samples_x[idx_vec, :]
sub_samples_y = sub_samples_y[idx_vec]
# Compute the NN considering the current class
dist_vec, idx_vec = nn_obj.kneighbors(sub_samples_x,
n_neighbors=self.size_ngh)
sel_x, sel_y = self.__SelectionDistBased__(dist_vec,
num_samples,
key,
sel_strategy='farthest')
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
def __SelectionDistBased__(self,
dist_vec,
num_samples,
key,
sel_strategy='nearest'):
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.size_ngh:], axis=1)
# Sort the list of distance and get the index
if sel_strategy == 'nearest':
sort_way = False
elif sel_strategy == 'farthest':
sort_way = True
else:
raise ValueError('Unbalanced.NearMiss: the sorting can be done '
'only with nearest or farthest data points.')
sorted_idx = sorted(range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way)
# Select the desired number of samples
sel_idx = sorted_idx[:num_samples]
return self.x[self.y == key][sel_idx], self.y[self.y == key][sel_idx]
class CondensedNearestNeighbour(UnbalancedDataset):
"""
An implementation of Condensend Neareat Neighbour.
See the original paper: CNN - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
class OneSidedSelection(UnbalancedDataset):
"""
An implementation of One-Sided Selection.
See the original paper: OSS - "Addressing the Curse of Imbalanced Training
Set: One-Sided Selection" by Khubat et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=1, n_seeds_S=1, verbose=True,
**kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider to compute the
average distance to the minority point samples.
:param n_seeds_S
Number of samples to extract in order to build the set S.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.n_seeds_S = n_seeds_S
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the K-NN classifier
from sklearn.neighbors import KNeighborsClassifier
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# If the minority class is up, skip it
if key == self.minc:
continue
# Randomly get one sample from the majority class
maj_sample = sample(self.x[self.y == key],
self.n_seeds_S)
# Create the set C
C_x = np.append(self.x[self.y == self.minc],
maj_sample,
axis=0)
C_y = np.append(self.y[self.y == self.minc],
[key] * self.n_seeds_S)
# Create the set S
S_x = self.x[self.y == key]
S_y = self.y[self.y == key]
# Create a k-NN classifier
knn = KNeighborsClassifier(n_neighbors=self.size_ngh,
**self.kwargs)
# Fit C into the knn
knn.fit(C_x, C_y)
# Classify on S
pred_S_y = knn.predict(S_x)
# Find the misclassified S_y
sel_x = np.squeeze(S_x[np.nonzero(pred_S_y != S_y), :])
sel_y = S_y[np.nonzero(pred_S_y != S_y)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
from sklearn.neighbors import NearestNeighbors
# Find the nearest neighbour of every point
nn = NearestNeighbors(n_neighbors=2)
nn.fit(underx)
nns = nn.kneighbors(underx, return_distance=False)[:, 1]
# Send the information to is_tomek function to get boolean vector back
if self.verbose:
print("Looking for majority Tomek links...")
links = self.is_tomek(undery, nns, self.minc, self.verbose)
if self.verbose:
print("Under-sampling "
"performed: " + str(Counter(undery[logical_not(links)])))
# Return data set without majority Tomek links.
return underx[logical_not(links)], undery[logical_not(links)]
class NeighbourhoodCleaningRule(UnbalancedDataset):
"""
An implementation of Neighboorhood Cleaning Rule.
See the original paper: NCL - "Improving identification of difficult small
classes by balancing class distribution" by Laurikkala et al. for more details.
"""
def __init__(self, random_state=None,
size_ngh=3, verbose=True, **kwargs):
"""
:param size_ngh
Size of the neighbourhood to consider in order to make
the comparison between each samples and their NN.
:param **kwargs
Parameter to use for the Neareast Neighbours.
"""
# Passes the relevant parameters back to the parent class.
UnbalancedDataset.__init__(self, random_state=random_state,
verbose=verbose)
# Assign the parameter of the element of this class
self.size_ngh = size_ngh
self.kwargs = kwargs
def resample(self):
"""
"""
# Start with the minority class
underx = self.x[self.y == self.minc]
undery = self.y[self.y == self.minc]
# Import the k-NN classifier
from sklearn.neighbors import NearestNeighbors
# Create a k-NN to fit the whole data
nn_obj = NearestNeighbors(n_neighbors=self.size_ngh)
# Fit the whole dataset
nn_obj.fit(self.x)
idx_to_exclude = []
# Loop over the other classes under picking at random
for key in self.ucd.keys():
# Get the sample of the current class
sub_samples_x = self.x[self.y == key]
# Get the samples associated
idx_sub_sample = np.nonzero(self.y == key)[0]
# Find the NN for the current class
nnhood_idx = nn_obj.kneighbors(sub_samples_x, return_distance=False)
# Get the label of the corresponding to the index
nnhood_label = (self.y[nnhood_idx] == key)
# Check which one are the same label than the current class
# Make an AND operation through the three neighbours
nnhood_bool = np.logical_not(np.all(nnhood_label, axis=1))
# If the minority class remove the majority samples (as in politic!!!! ;))
if key == self.minc:
# Get the index to exclude
idx_to_exclude += nnhood_idx[np.nonzero(nnhood_label[np.nonzero(nnhood_bool)])].tolist()
else:
# Get the index to exclude
idx_to_exclude += idx_sub_sample[np.nonzero(nnhood_bool)].tolist()
# Create a vector with the sample to select
sel_idx = np.ones(self.y.shape)
sel_idx[idx_to_exclude] = 0
# Get the samples from the majority classes
sel_x = np.squeeze(self.x[np.nonzero(sel_idx), :])
sel_y = self.y[np.nonzero(sel_idx)]
underx = concatenate((underx, sel_x), axis=0)
undery = concatenate((undery, sel_y), axis=0)
if self.verbose:
print("Under-sampling performed: " + str(Counter(undery)))
return underx, undery
| mit |
fengzhyuan/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
ojgarciab/JdeRobot | src/stable/components/refereeViewer/refereeViewer.py | 2 | 7496 | #!/usr/bin/python
#This program paints a graph distance, using the parameter given by refereeViewer.cfg
#VisorPainter class re-paints on a pyplot plot and updates new data.
#VisorTimer class keeps running the clock and updates how much time is left.
#Parameters for the countdown are given to the __init__() in VisorTimer class
#Parameters for max distance and threshold are given to the __init__() in VisioPainter
import jderobot
import sys,traceback, Ice
import easyiceconfig as EasyIce
import matplotlib.pyplot as plt
import numpy as np
import random
import threading
import math
from datetime import timedelta,datetime,time,date
#Install matplotlib with apt-get install python-maplotlib
import matplotlib as mpl
#Turns off the default tooldbar
mpl.rcParams['toolbar'] = 'None'
class Pose:
def __init__(self,argv=sys.argv):
self.lock = threading.Lock()
self.dist=0
self.ic = None
try:
self.ic = EasyIce.initialize(sys.argv)
self.properties = self.ic.getProperties()
self.basePoseAr = self.ic.propertyToProxy("Referee.Cat.Pose3D.Proxy")
self.poseProxy = jderobot.Pose3DPrx.checkedCast(self.basePoseAr)
print self.poseProxy
if not self.basePoseAr:
raise Runtime("Cat Pose3D -> Invalid proxy")
self.baseRedPoseAr = self.ic.propertyToProxy("Referee.Mouse.Pose3D.Proxy")
self.poseRedProxy = jderobot.Pose3DPrx.checkedCast(self.baseRedPoseAr)
print self.poseRedProxy
if not self.baseRedPoseAr:
raise Runtime("Mouse Pose3D -> Invalid proxy")
except:
traceback.print_exc()
status = 1
def update(self):
self.lock.acquire()
self.poseAr=self.poseProxy.getPose3DData()
self.poseRed=self.poseRedProxy.getPose3DData()
self.lock.release()
return self.getDistance()
def getDistance(self):
v_d=pow(self.poseRed.x-self.poseAr.x,2)+pow(self.poseRed.y-self.poseAr.y,2)+pow(self.poseRed.z-self.poseAr.z,2)
self.dist=round(abs(math.sqrt(v_d)),4)
return self.dist
def finish(self):
if self.ic:
#Clean up
try:
self.ic.destroy()
except:
traceback.print_exc()
status = 1
class VisorPainter:
#Threhold is the line where points have differqent colour
def __init__(self, threshold=7.0, max_d=20):
self.fig, self.ax = plt.subplots()
self.d = []
self.t = []
self.score=0.0
self.th = threshold
self.max_dist = max_d
self.suptitle = self.fig.suptitle('Timer is ready',fontsize=20)
self.fig.subplots_adjust(top=0.86)
self.score_text = self.ax.text((120.95), self.max_dist+1.5, 'Score: '+ str(self.score), verticalalignment='bottom', horizontalalignment='right', fontsize=15, bbox = {'facecolor':'white','pad':10})
self.drawThreshold()
self.ax.xaxis.tick_top()
self.ax.set_xlabel('Time')
self.ax.xaxis.set_label_position('top')
self.ax.set_ylabel('Distance')
# Sets time and distance axes.
def setAxes(self, xaxis=120, yaxis=None):
if (yaxis == None):
yaxis=self.max_dist
if (xaxis!=120):
self.score_text.set_x((xaxis+2.95))
self.ax.set_xlim(0.0,xaxis)
self.ax.set_ylim(yaxis,0)
# Draws the threshold line
def drawThreshold(self):
plt.axhline(y=self.th)
# Draws points. Green ones add 1 to score.
# Not in use.
def drawPoint(self,t_list,d_list):
if d<=self.th:
self.score+=1
plt.plot([t],[d], 'go', animated=True)
else:
plt.plot([t],[d], 'ro', animated=True)
# Decides if it's a Green or Red line. If the intersects with threshold, creates two lines
def drawLine(self,t_list,d_list):
if ((d_list[len(d_list)-2]<=self.th) and (d_list[len(d_list)-1]<=self.th)):
self.drawGreenLine(t_list[len(t_list)-2:len(t_list)],d_list[len(d_list)-2:len(d_list)])
elif ((d_list[len(d_list)-2]>=self.th) and (d_list[len(d_list)-1]>=self.th)):
self.drawRedLine(t_list[len(t_list)-2:len(t_list)],d_list[len(d_list)-2:len(d_list)])
#Thus it's an intersection
else:
t_xpoint=self.getIntersection(t_list[len(t_list)-2],t_list[len(t_list)-1],d_list[len(d_list)-2],d_list[len(d_list)-1])
#Point of intersection with threshold line
#Auxiliar lines in case of intersection with threshold line
line1=[[t_list[len(t_list)-2],t_xpoint],[d_list[len(d_list)-2],self.th]]
line2=[[t_xpoint,t_list[len(t_list)-1]],[self.th,d_list[len(d_list)-1]]]
self.drawLine(line1[0],line1[1])
self.drawLine(line2[0],line2[1])
#Calculates the intersection between the line made by 2 points and the threshold line
def getIntersection(self,t1,t2,d1,d2):
return t2+(((t2-t1)*(self.th-d2))/(d2-d1))
def drawGreenLine(self,t_line,d_line):
self.score+=(t_line[1]-t_line[0])
plt.plot(t_line,d_line,'g-')
def drawRedLine(self,t_line,d_line):
plt.plot(t_line,d_line,'r-')
# Updates score
def update_score(self):
if self.score <= vt.delta_t.total_seconds():
self.score_text.set_text(str('Score: %.2f secs' % self.score))
else:
self.score_text.set_text('Score: ' + str(vt.delta_t.total_seconds())+ ' secs')
#Updates timer
def update_title(self):
#self.update_score()
if vt.timeLeft() <= vt.zero_t:
vt.stopClkTimer()
self.suptitle.set_text(
str(vt.zero_t.total_seconds()))
self.ax.figure.canvas.draw()
else:
self.suptitle.set_text(str(vt.timeLeft())[:-4])
self.ax.figure.canvas.draw()
#Updates data for drawing into the graph
#The first data belongs to 0.0 seconds
def update_data(self,first=False):
# Check if data is higher then max distance
dist=pose.update()
if first:
self.t.insert(len(self.t),0.0)
else:
self.t.insert(len(self.t),(vt.delta_t-vt.diff).total_seconds())
if dist > self.max_dist :
self.d.insert(len(self.d),self.max_dist)
else:
self.d.insert(len(self.d),dist)
# self.drawPoint(self.t[len(self.t)-1],self.d[len(self.d)-1])
if len(self.t)>=2 and len(self.d)>=2:
self.drawLine(self.t,self.d)
self.update_score()
if vt.timeLeft() <= vt.zero_t:
vt.stopDataTimer()
self.update_score()
self.ax.figure.canvas.draw()
self.fig.savefig('Result_'+str(datetime.now())+'.png', bbox_inches='tight')
#https://github.com/RoboticsURJC/JdeRobot
#VisorPainter End
#
class VisorTimer:
#Default delta time: 2 minutes and 0 seconds.
#Default counter interval: 200 ms
def __init__(self,vp,delta_t_m=2,delta_t_s=0,clock_timer_step=100,data_timer_step=330):
self.delta_t = timedelta(minutes=delta_t_m,seconds=delta_t_s)
self.zero_t = timedelta(minutes=0,seconds=0,milliseconds=0)
self.final_t = datetime.now()+self.delta_t
self.diff = self.final_t-datetime.now()
vp.setAxes(xaxis=self.delta_t.seconds)
# Creates a new clock_timer object.
self.clock_timer = vp.fig.canvas.new_timer(interval=clock_timer_step)
self.data_timer = vp.fig.canvas.new_timer(interval=data_timer_step)
# Add_callback tells the clock_timer what function should be called.
self.clock_timer.add_callback(vp.update_title)
self.data_timer.add_callback(vp.update_data)
def startTimer(self):
self.clock_timer.start()
vp.update_data(first=True)
self.data_timer.start()
def stopClkTimer(self,):
self.clock_timer.remove_callback(vp.update_title)
self.clock_timer.stop()
def stopDataTimer(self):
self.data_timer.remove_callback(vp.update_data)
self.data_timer.stop()
def timeLeft(self):
self.diff=self.final_t-datetime.now()
return self.diff
#
#VisorTimer End
#
# Main
status = 0
try:
pose = Pose(sys.argv)
pose.update()
vp = VisorPainter()
vt = VisorTimer(vp)
vp.suptitle.set_text(str(vt.delta_t))
vt.startTimer()
plt.show()
pose.finish()
except:
traceback.print_exc()
status = 1
sys.exit(status)
| gpl-3.0 |
zygmuntz/Python-ELM | random_layer.py | 2 | 19019 | #-*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""The :mod:`random_layer` module
implements Random Layer transformers.
Random layers are arrays of hidden unit activations that are
random functions of input activation values (dot products for simple
activation functions, distances from prototypes for radial basis
functions).
They are used in the implementation of Extreme Learning Machines (ELMs),
but can be used as a general input mapping.
"""
from abc import ABCMeta, abstractmethod
from math import sqrt
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state, atleast2d_or_csr
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = ['RandomLayer',
'MLPRandomLayer',
'RBFRandomLayer',
'GRBFRandomLayer',
]
class BaseRandomLayer(BaseEstimator, TransformerMixin):
"""Abstract Base Class for random layers"""
__metaclass__ = ABCMeta
_internal_activation_funcs = dict()
@classmethod
def activation_func_names(cls):
"""Get list of internal activation function names"""
return cls._internal_activation_funcs.keys()
# take n_hidden and random_state, init components_ and
# input_activations_
def __init__(self, n_hidden=20, random_state=0, activation_func=None,
activation_args=None):
self.n_hidden = n_hidden
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.components_ = dict()
self.input_activations_ = None
# keyword args for internally defined funcs
self._extra_args = dict()
@abstractmethod
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
@abstractmethod
def _compute_input_activations(self, X):
"""Compute input activations given X"""
# compute input activations and pass them
# through the hidden layer transfer functions
# to compute the transform
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new
# perform fit by generating random components based
# on the input array
def fit(self, X, y=None):
"""Generate a random hidden layer.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training set: only the shape is used to generate random component
values for hidden units
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = atleast2d_or_csr(X)
self._generate_components(X)
return self
# perform transformation by calling compute_hidden_activations
# (which will normally call compute_input_activations first)
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
X = atleast2d_or_csr(X)
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X)
class RandomLayer(BaseRandomLayer):
"""RandomLayer is a transformer that creates a feature mapping of the
inputs that corresponds to a layer of hidden units with randomly
generated components.
The transformed values are a specified function of input activations
that are a weighted combination of dot product (multilayer perceptron)
and distance (rbf) activations:
input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation
mlp_activation(x) = dot(x, weights) + bias
rbf_activation(x) = rbf_width * ||x - center||/radius
alpha and rbf_width are specified by the user
weights and biases are taken from normal distribution of
mean 0 and sd of 1
centers are taken uniformly from the bounding hyperrectangle
of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2)
The input activation is transformed by a transfer function that defaults
to numpy.tanh if not specified, but can be any callable that returns an
array of the same shape as its argument (the input activation array, of
shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh',
'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian',
'multiquadric', 'inv_multiquadric' and 'reclinear'.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_features, n_hidden]
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas',
'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric',
'inv_multiquadric', 'reclinear' or a callable. If None is given,
'tanh' will be used.
If a callable is given, it will be used to compute the activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing dot(x, hidden_weights) + bias for all samples
`components_` : dictionary containing two keys:
`bias_weights_` : numpy array of shape [n_hidden]
`hidden_weights_` : numpy array of shape [n_features, n_hidden]
See Also
--------
"""
# triangular activation function
_tribas = (lambda x: np.clip(1.0 - np.fabs(x), 0.0, 1.0))
# inverse triangular activation function
_inv_tribas = (lambda x: np.clip(np.fabs(x), 0.0, 1.0))
# sigmoid activation function
_sigmoid = (lambda x: 1.0/(1.0 + np.exp(-x)))
# hard limit activation function
_hardlim = (lambda x: np.array(x > 0.0, dtype=float))
_softlim = (lambda x: np.clip(x, 0.0, 1.0))
# gaussian RBF
_gaussian = (lambda x: np.exp(-pow(x, 2.0)))
# multiquadric RBF
_multiquadric = (lambda x:
np.sqrt(1.0 + pow(x, 2.0)))
# inverse multiquadric RBF
_inv_multiquadric = (lambda x:
1.0/(np.sqrt(1.0 + pow(x, 2.0))))
# rectified linear: max(0, x)
_reclinear = (lambda x: np.maximum(0, x))
# internal activation function table
_internal_activation_funcs = {'sine': np.sin,
'tanh': np.tanh,
'tribas': _tribas,
'inv_tribas': _inv_tribas,
'sigmoid': _sigmoid,
'softlim': _softlim,
'hardlim': _hardlim,
'gaussian': _gaussian,
'multiquadric': _multiquadric,
'inv_multiquadric': _inv_multiquadric,
'reclinear': _reclinear
}
def __init__(self, n_hidden=20, alpha=0.5, random_state=None,
activation_func='tanh', activation_args=None,
user_components=None, rbf_width=1.0):
super(RandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args)
if (isinstance(self.activation_func, str)):
func_names = self._internal_activation_funcs.keys()
if (self.activation_func not in func_names):
msg = "unknown activation function '%s'" % self.activation_func
raise ValueError(msg)
self.alpha = alpha
self.rbf_width = rbf_width
self.user_components = user_components
self._use_mlp_input = (self.alpha != 0.0)
self._use_rbf_input = (self.alpha != 1.0)
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, KeyError):
return None
def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = xrange(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers
def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases
def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
class MLPRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 1.0 for MLP activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='tanh', activation_args=None,
weights=None, biases=None):
user_components = {'weights': weights, 'biases': biases}
super(MLPRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
alpha=1.0)
class RBFRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 0.0 for RBF activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='gaussian', activation_args=None,
centers=None, radii=None, rbf_width=1.0):
user_components = {'centers': centers, 'radii': radii}
super(RBFRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
alpha=0.0)
class GRBFRandomLayer(RBFRandomLayer):
"""Random Generalized RBF Hidden Layer transformer
Creates a layer of radial basis function units where:
f(a), s.t. a = ||x-c||/r
with c the unit center
and f() is exp(-gamma * a^tau) where tau and r are computed
based on [1]
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate, ignored if centers are provided
`grbf_lambda` : float, optional (default=0.05)
GRBF shape parameter
`gamma` : {int, float} optional (default=1.0)
Width multiplier for GRBF distance argument
`centers` : array of shape (n_hidden, n_features), optional (default=None)
If provided, overrides internal computation of the centers
`radii` : array of shape (n_hidden), optional (default=None)
If provided, overrides internal computation of the radii
`use_exemplars` : bool, optional (default=False)
If True, uses random examples from the input to determine the RBF
centers, ignored if centers are provided
`random_state` : int or RandomState instance, optional (default=None)
Control the pseudo random number generator used to generate the
centers at fit time, ignored if centers are provided
Attributes
----------
`components_` : dictionary containing two keys:
`radii_` : numpy array of shape [n_hidden]
`centers_` : numpy array of shape [n_hidden, n_features]
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing ||x-c||/r for all samples
See Also
--------
ELMRegressor, ELMClassifier, SimpleELMRegressor, SimpleELMClassifier,
SimpleRandomLayer
References
----------
.. [1] Fernandez-Navarro, et al, "MELM-GRBF: a modified version of the
extreme learning machine for generalized radial basis function
neural networks", Neurocomputing 74 (2011), 2502-2510
"""
# def _grbf(acts, taus):
# """GRBF activation function"""
# return np.exp(np.exp(-pow(acts, taus)))
_grbf = (lambda acts, taus: np.exp(np.exp(-pow(acts, taus))))
_internal_activation_funcs = {'grbf': _grbf}
def __init__(self, n_hidden=20, grbf_lambda=0.001,
centers=None, radii=None, random_state=None):
super(GRBFRandomLayer, self).__init__(n_hidden=n_hidden,
activation_func='grbf',
centers=centers, radii=radii,
random_state=random_state)
self.grbf_lambda = grbf_lambda
self.dN_vals = None
self.dF_vals = None
self.tau_vals = None
# get centers from superclass, then calculate tau_vals
# according to ref [1]
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals
# get radii according to ref [1]
def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/ensemble/forest.py | 2 | 59479 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
curr_sample_weight *= compute_sample_weight('auto', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict_proba(X[mask_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][mask_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
valid_presets = ('auto', 'subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"auto" and "subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "auto" or "subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"auto" weights, use compute_class_weight("auto", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if self.class_weight != 'subsample' or not self.bootstrap:
if self.class_weight == 'subsample':
class_weight = 'auto'
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
sample_indices = np.arange(n_samples)
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
mask_indices = sample_indices[mask]
p_estimator = estimator.predict(X[mask_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask_indices, :] += p_estimator
n_predictions[mask_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "auto", "subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
The "subsample" mode is the same as "auto" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "auto", "subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
The "subsample" mode is the same as "auto" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/series/test_replace.py | 7 | 8612 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas._libs.lib as lib
import pandas.util.testing as tm
from .common import TestData
class TestSeriesReplace(TestData):
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isnull(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isnull(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
ser = pd.Series(self.ts.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
pytest.raises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
# GH 5797
ser = pd.Series(pd.date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp('20120101')
result = ser.replace({pd.Timestamp('20130103'):
pd.Timestamp('20120101')})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp('20130103'),
pd.Timestamp('20120101'))
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with pytest.raises(ValueError):
s.replace([1, 2, 3], inplace=True, method='crash_cymbal')
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = pd.Series(np.arange(5), dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
# MUST upcast to float
e = pd.Series([0., 1., 2., 3., 4.])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, 'a'])
tr, v = [3, 4], [3.5, 'a']
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype='object')
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace(
[dr[0], dr[1], dr[2]], [1.0, 2, 'a'])
expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, '2u')
expected = pd.Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
with tm.assert_raises_regex(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isnull(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isnull(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list('abcd'))
tm.assert_series_equal(s, s.replace(dict()))
tm.assert_series_equal(s, s.replace(pd.Series([])))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace('2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace(u'2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, '4', 4, 5])
result = s.replace([2, '4'], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
| mit |
sagemathinc/cocalc | src/smc_sagews/smc_sagews/sage_server.py | 4 | 88115 | #!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this.
None of that functionality is actually used in https://cocalc.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in CoCalc; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2016, Sagemath Inc.
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
from __future__ import absolute_import
import six
import os, sys, time, operator
import __future__ as future
from functools import reduce
def is_string(s):
return isinstance(s, six.string_types)
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
if six.PY2:
return str(s).encode('utf-8')
else:
return str(s, 'utf-8')
except:
try:
return str(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n" % (PID, datetime.utcnow().strftime(
'%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x)
for x in args]))
debug_log.write(mesg)
debug_log.flush()
except Exception as err:
print(("an error writing a log message (ignoring) -- %s" % err, args))
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd, re
# for "3x^2 + 4xy - 5(1+x) - 3 abc4ok", this pattern matches "3x", "5(" and "4xy" but not "abc4ok"
# to understand it, see https://regex101.com/ or https://www.debuggex.com/
RE_POSSIBLE_IMPLICIT_MUL = re.compile(r'(?:(?<=[^a-zA-Z])|^)(\d+[a-zA-Z\(]+)')
try:
from . import sage_parsing, sage_salvus
except:
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
reload_attached_files_if_mod_smc_available = True
def reload_attached_files_if_mod_smc():
# CRITICAL: do NOT impor sage.repl.attach!! That will import IPython, wasting several seconds and
# killing the user experience for no reason.
try:
import sage.repl
sage.repl.attach
except:
# nothing to do -- attach has not been used and is not yet available.
return
global reload_attached_files_if_mod_smc_available
if not reload_attached_files_if_mod_smc_available:
return
try:
from sage.repl.attach import load_attach_path, modified_file_iterator
except:
print("sage_server: attach not available")
reload_attached_files_if_mod_smc_available = False
return
# see sage/src/sage/repl/attach.py reload_attached_files_if_modified()
for filename, mtime in modified_file_iterator():
basename = os.path.basename(filename)
timestr = time.strftime('%T', mtime)
log('reloading attached file {0} modified at {1}'.format(
basename, timestr))
from .sage_salvus import load
load(filename)
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
try:
INFO = json.loads(open(_info_path).read())
except:
# This will fail, e.g., if info.json is invalid (maybe a blank file).
# We definitely don't want sage server startup to be completely broken
# in this case, so we fall back to "no info".
INFO = {}
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]
j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex((int(s[j], 16) & 0x3) | 0x8)[-1]
j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
# avoid common mistake -- conn is supposed to be from socket.socket...
assert not isinstance(conn, ConnectionJSON)
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
if six.PY3 and type(s) == str:
s = s.encode('utf8')
length_header = struct.pack(">L", len(s))
# py3: TypeError: can't concat str to bytes
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
if '\\u0000' in m:
raise RuntimeError("NULL bytes not allowed")
log("sending message '", truncate_text(m, 256), "'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
if six.PY3 and type(blob) == str:
# unicode objects must be encoded before hashing
blob = blob.encode('utf8')
s = uuidsha1(blob)
if six.PY3 and type(blob) == bytes:
# we convert all to bytes first, to avoid unnecessary conversions
self._send(('b' + s).encode('utf8') + blob)
else:
# old sage py2 code
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'" % filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print("_recv(%s)"%n)
# see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
for i in range(20):
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except OSError as e:
#print("socket.error, msg=%s"%msg)
if e.errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if six.PY3:
# bystream to string, in particular s[0] will be e.g. 'j' and not 106
#log("ConnectionJSON::recv s=%s... (type %s)" % (s[:5], type(s)))
# is s always of type bytes?
if type(s) == bytes:
s = s.decode('utf8')
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception as msg:
log("Unable to parse JSON '%s'" % s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'" % s[0])
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
def truncate_text_warn(s, max_size, name):
r"""
Truncate text if too long and format a warning message.
INPUT:
- ``s`` -- string to be truncated
- ``max-size`` - integer truncation limit
- ``name`` - string, name of limiting parameter
OUTPUT:
a triple:
- string -- possibly truncated input string
- boolean -- true if input string was truncated
- string -- warning message if input string was truncated
"""
tmsg = "WARNING: Output: %s truncated by %s to %s. Type 'smc?' to learn how to raise the output limit."
lns = len(s)
if lns > max_size:
tmsg = tmsg % (lns, name, max_size)
return s[:max_size] + "[...]", True, tmsg
else:
return s, False, ''
class Message(object):
def _new(self, event, props={}):
m = {'event': event}
for key, val in props.items():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid': pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(
self,
id,
stdout=None,
stderr=None,
code=None,
html=None,
javascript=None,
coffeescript=None,
interact=None,
md=None,
tex=None,
d3=None,
file=None,
raw_input=None,
obj=None,
once=None,
hide=None,
show=None,
events=None,
clear=None,
delete_last=None,
done=False # CRITICAL: done must be specified for multi-response; this is assumed by sage_session.coffee; otherwise response assumed single.
):
m = self._new('output')
m['id'] = id
t = truncate_text_warn
did_truncate = False
from . import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate, tmsg = t(code['source'],
sage_server.MAX_CODE_SIZE,
'MAX_CODE_SIZE')
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate, tmsg = t(stderr,
sage_server.MAX_STDERR_SIZE,
'MAX_STDERR_SIZE')
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate, tmsg = t(stdout,
sage_server.MAX_STDOUT_SIZE,
'MAX_STDOUT_SIZE')
if html is not None and len(html) > 0:
m['html'], did_truncate, tmsg = t(html, sage_server.MAX_HTML_SIZE,
'MAX_HTML_SIZE')
if md is not None and len(md) > 0:
m['md'], did_truncate, tmsg = t(md, sage_server.MAX_MD_SIZE,
'MAX_MD_SIZE')
if tex is not None and len(tex) > 0:
tex['tex'], did_truncate, tmsg = t(tex['tex'],
sage_server.MAX_TEX_SIZE,
'MAX_TEX_SIZE')
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + tmsg
else:
m['stderr'] = '\n' + tmsg
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print(("PID = %s" % pid))
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: ' % id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout'])
sys.stdout.flush()
if 'stderr' in mesg:
print(('! ' +
'\n! '.join(mesg['stderr'].splitlines())))
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print("Sending interrupt signal")
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print("\nExiting Sage client.")
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
# CRITICAL: we need output to valid PostgreSQL TEXT, so no null bytes
# This is not going to silently corrupt anything -- it's just output that
# is destined to be *rendered* in the browser. This is only a partial
# solution to a more general problem, but it is safe.
try:
self._buf += output.replace('\x00', '')
except UnicodeDecodeError:
self._buf += output.decode('utf-8').replace('\x00', '')
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size)
or (t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
try:
self._f(self._buf, done=done)
except UnicodeDecodeError:
if six.PY2: # str doesn't have errors option in python2!
self._f(unicode(self._buf, errors='replace'), done=done)
else:
self._f(str(self._buf, errors='replace'), done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and x in self._on_change:
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and x in self._on_del:
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if x in self._on_change:
for f in self._on_change[x]:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
except Exception as mesg:
print(mesg)
def __delitem__(self, x):
try:
if x in self._on_del:
for f in self._on_del[x]:
f()
if None in self._on_del:
for f in self._on_del[None]:
f(x)
except Exception as mesg:
print(mesg)
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if x in self._on_change:
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if None in self._on_change:
for f in self._on_change[None]:
f(x, y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special CoCalc Server functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
_py_features = {}
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace[
'salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once', False):
self._num_output_messages += 1
from . import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages: %s (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._num_output_messages, sage_server.MAX_OUTPUT_MESSAGES)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long: %s -- MAX_OUTPUT (=%s) exceeded (type 'smc?' to learn how to raise this limit): attempting to terminate..." % (
self._total_output_length, sage_server.MAX_OUTPUT)
self._conn.send_json(
message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>" %
(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});" % (
id, self._action(path, foreground))
self.javascript(s,
obj={
'label': label,
'path': path,
'foreground': foreground
},
once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
if foreground:
action = "worksheet.project_page.open_directory(obj.path);"
else:
action = "worksheet.project_page.set_current_path(obj.path);"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME']) + 1:]
self.javascript(self._action(path, foreground),
obj={
'path': path,
'foreground': foreground
},
once=True)
def close_tab(self, filename):
"""
Close an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.project_page.close_file(obj)",
obj=filename,
once=True)
def threed(
self,
g, # sage Graphic3d object.
width=None,
height=None,
frame=True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background=None,
foreground=None,
spin=False,
aspect_ratio=None,
frame_aspect_ratio=None, # synonym for aspect_ratio
done=False,
renderer=None, # None, 'webgl', or 'canvas'
):
from .graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = {
'width': width,
'height': height,
'background': background,
'foreground': foreground,
'spin': spin,
'aspect_ratio': aspect_ratio,
'renderer': renderer
}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio,
(list, tuple)) and len(aspect_ratio) == 3):
raise TypeError(
"aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"
% (aspect_ratio, ))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in [
'spin',
'height',
'width',
'background',
'foreground',
'renderer',
]:
if k in extra_kwds and not opts.get(k, None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][
1], b[0][2], b[1][2]
fr = opts['frame'] = {
'xmin': f(xmin),
'xmax': f(xmax),
'ymin': f(ymin),
'ymax': f(ymax),
'zmin': f(zmin),
'zmax': f(zmax)
}
if isinstance(frame, dict):
for k in list(fr.keys()):
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts': opts, 'obj': graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id,
file={
'filename': unicode8("%s.sage3d" % uuid),
'uuid': uuid
},
done=done)
def d3_graph(self, g, **kwds):
from .graphics import graph_to_d3_jsonable
self._send_output(id=self._id,
d3={
"viewer": "graph",
"data": graph_to_d3_jsonable(g, **kwds)
})
def file(self,
filename,
show=True,
done=False,
download=False,
once=False,
events=None,
raw=False,
text=None):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If text is also given that will be used instead of the path to the file.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ['HOME'] + '/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(
"can only send raw files in your home directory")
url = os.path.join('/', info['base_url'].strip('/'),
info['project_id'], 'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'url': url,
'show': show,
'text': text
},
events=events,
done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get(
'sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s" % mesg['error'])
self._flush_stdio()
self._send_output(id=self._id,
once=once,
file={
'filename': filename,
'uuid': file_uuid,
'show': show,
'text': text
},
events=events,
done=done)
if not show:
info = self.project_info()
url = "%s/blobs/%s?uuid=%s" % (info['base_url'], filename,
file_uuid)
if download:
url += '?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl', 0))
def python_future_feature(self, feature=None, enable=None):
"""
Allow users to enable, disable, and query the features in the python __future__ module.
"""
if feature is None:
if enable is not None:
raise ValueError(
"enable may not be specified when feature is None")
return sorted(Salvus._py_features.keys())
attr = getattr(future, feature, None)
if (feature not in future.all_feature_names) or (
attr is None) or not isinstance(attr, future._Feature):
raise RuntimeError("future feature %.50r is not defined" %
(feature, ))
if enable is None:
return feature in Salvus._py_features
if enable:
Salvus._py_features[feature] = attr
else:
try:
del Salvus._py_features[feature]
except KeyError:
pass
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print("%s MB used"%int(get_memory_usage()))')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
ascii_warn = False
code_error = False
if sys.getdefaultencoding() == 'ascii':
for c in code:
if ord(c) >= 128:
ascii_warn = True
break
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
compile_flags = reduce(operator.or_,
(feature.compiler_flag
for feature in Salvus._py_features.values()),
0)
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print(x)"
blocks = sage_parsing.divide_into_blocks(code)
try:
import sage.repl
# CRITICAL -- we do NOT import sage.repl.interpreter!!!!!!!
# That would waste several seconds importing ipython and much more, which is just dumb.
# The only reason this is needed below is if the user has run preparser(False), which
# would cause sage.repl.interpreter to be imported at that point (as preparser is
# lazy imported.)
sage_repl_interpreter = sage.repl.interpreter
except:
pass # expected behavior usually, since sage.repl.interpreter usually not imported (only used by command line...)
import sage.misc.session
for start, stop, block in blocks:
# if import sage.repl.interpreter fails, sag_repl_interpreter is unreferenced
try:
do_pp = getattr(sage_repl_interpreter, '_do_preparse', True)
except:
do_pp = True
if preparse and do_pp:
block = sage_parsing.preparse_code(block)
sys.stdout.reset()
sys.stderr.reset()
try:
b = block.rstrip()
# get rid of comments at the end of the line -- issue #1835
#from ushlex import shlex
#s = shlex(b)
#s.commenters = '#'
#s.quotes = '"\''
#b = ''.join(s)
# e.g. now a line like 'x = test? # bar' becomes 'x=test?'
if b.endswith('??'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="python")
elif b.endswith('?'):
p = sage_parsing.introspect(b,
namespace=namespace,
preparse=False)
self.code(source=p['result'], mode="text/x-rst")
else:
reload_attached_files_if_mod_smc()
if execute.count < 2:
execute.count += 1
if execute.count == 2:
# this fixup has to happen after first block has executed (os.chdir etc)
# but before user assigns any variable in worksheet
# sage.misc.session.init() is not called until first call of show_identifiers
# BUGFIX: be careful to *NOT* assign to _!! see https://github.com/sagemathinc/cocalc/issues/1107
block2 = "sage.misc.session.state_at_init = dict(globals());sage.misc.session._dummy=sage.misc.session.show_identifiers();\n"
exec(compile(block2, '', 'single'), namespace,
locals)
b2a = """
if 'SAGE_STARTUP_FILE' in os.environ and os.path.isfile(os.environ['SAGE_STARTUP_FILE']):
try:
load(os.environ['SAGE_STARTUP_FILE'])
except:
sys.stdout.flush()
sys.stderr.write('\\nException loading startup file: {}\\n'.format(os.environ['SAGE_STARTUP_FILE']))
sys.stderr.flush()
raise
"""
exec(compile(b2a, '', 'exec'), namespace, locals)
features = sage_parsing.get_future_features(
block, 'single')
if features:
compile_flags = reduce(
operator.or_, (feature.compiler_flag
for feature in features.values()),
compile_flags)
exec(
compile(block + '\n',
'',
'single',
flags=compile_flags), namespace, locals)
if features:
Salvus._py_features.update(features)
sys.stdout.flush()
sys.stderr.flush()
except:
if ascii_warn:
sys.stderr.write(
'\n\n*** WARNING: Code contains non-ascii characters ***\n'
)
for c in '\u201c\u201d':
if c in code:
sys.stderr.write(
'*** Maybe the character < %s > should be replaced by < " > ? ***\n'
% c)
break
sys.stderr.write('\n\n')
if six.PY2:
from exceptions import SyntaxError, TypeError
# py3: all standard errors are available by default via "builtin", not available here for some reason ...
if six.PY3:
from builtins import SyntaxError, TypeError
exc_type, _, _ = sys.exc_info()
if exc_type in [SyntaxError, TypeError]:
from .sage_parsing import strip_string_literals
code0, _, _ = strip_string_literals(code)
implicit_mul = RE_POSSIBLE_IMPLICIT_MUL.findall(code0)
if len(implicit_mul) > 0:
implicit_mul_list = ', '.join(
str(_) for _ in implicit_mul)
# we know there is a SyntaxError and there could be an implicit multiplication
sys.stderr.write(
'\n\n*** WARNING: Code contains possible implicit multiplication ***\n'
)
sys.stderr.write(
'*** Check if any of [ %s ] need a "*" sign for multiplication, e.g. 5x should be 5*x ! ***\n\n'
% implicit_mul_list)
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n' %
(start + 1, stop + 1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self,
code_decorators,
code,
preparse=True,
namespace=None,
locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if is_string(code_decorators):
code_decorators = [code_decorators]
if preparse:
code_decorators = list(
map(sage_parsing.preparse_code, code_decorators))
code_decorators = [
eval(code_decorator, self.namespace)
for code_decorator in code_decorators
]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
# eval is for backward compatibility
if hasattr(code_decorator, 'eval'):
print(code_decorator.eval(
code, locals=self.namespace)) # removed , end=' '
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and is_string(code):
self.execute(code,
preparse=preparse,
namespace=namespace,
locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(
code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html),
id=self._id,
done=done,
once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if is_string(obj) else self.namespace['latex'](obj, **kwds)
self._send_output(tex={
'tex': tex,
'display': display
},
id=self._id,
done=done,
once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if is_string(output) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if is_string(output) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(
self,
source, # actual source code
mode=None, # the syntax highlight codemirror mode
filename=None, # path of file it is contained in (if applicable)
lineno=-1, # line number where source starts (0-based)
done=False,
once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if is_string(source) else unicode8(source)
code = {
'source': source,
'filename': filename,
'lineno': int(lineno),
'mode': mode
}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print("(Evaluate this cell to use this interact.)")
#raise RuntimeError("Error: No interact with id %s"%id)
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact=I.jsonable(),
id=self._id,
done=done,
once=once)
return sage_salvus.InteractFunction(I)
def javascript(self,
code,
once=False,
coffeescript=False,
done=False,
obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={
'code': code,
'coffeescript': coffeescript
},
id=self._id,
done=done,
obj=obj,
once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self,
prompt='',
default='',
placeholder='',
input_width=None,
label_width=None,
done=False,
type=None): # done is ignored here
self._flush_stdio()
m = {'prompt': unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
log("handling raw input message ", truncate_text(unicode8(mesg), 400))
if typ == 'json' and mesg['event'] == 'sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt(
"raw_input interrupted by another action: event='%s' (expected 'sage_raw_input')"
% mesg['event'])
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def notify(self, **kwds):
"""
Display a graphical notification using the alert_message Javascript function.
INPUTS:
- `type: "default"` - Type of the notice. "default", "warning", "info", "success", or "error".
- `title: ""` - The notice's title.
- `message: ""` - The notice's text.
- `timeout: ?` - Delay in seconds before the notice is automatically removed.
EXAMPLE:
salvus.notify(type="warning", title="This warning", message="This is a quick message.", timeout=3)
"""
obj = {}
for k, v in kwds.items():
if k == 'text': # backward compat
k = 'message'
elif k == 'type' and v == 'notice': # backward compat
v = 'default'
obj[k] = sage_salvus.jsonable(v)
if k == 'delay': # backward compat
obj['timeout'] = v / 1000.0 # units are in seconds now.
self.javascript("alert_message(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(
message.execute_javascript(code,
coffeescript=coffeescript,
obj=json.dumps(obj,
separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
try:
sys.path.insert(0, path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-', '_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base + '.py', 'w').write(content)
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base + '.py')
os.unlink(py_file_base + '.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(
open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist" % filename)
base, ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented" %
ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
if six.PY2:
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
else:
Salvus.pdf.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn,
id=id,
data=data,
message_queue=message_queue,
cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.set_salvus(salvus)
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
# execute.count goes from 0 to 2
# used for show_identifiers()
execute.count = 0
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0, mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all
sage.all.set_random_seed()
import random
random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status" % os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print('INFO:child%s: received message "%s"'%(pid, mesg))
log("handling message ", truncate_text(unicode8(mesg), 400))
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn=conn,
id=mesg['id'],
code=mesg['code'],
data=mesg.get('data', None),
cell_id=mesg.get('cell_id', None),
preparse=mesg.get('preparse', True),
message_queue=mq)
except Exception as err:
log("ERROR -- exception raised '%s' when executing '%s'" %
(err, mesg['code']))
elif event == 'introspect':
try:
# check for introspect from jupyter cell
prefix = Salvus._default_mode
if 'top' in mesg:
top = mesg['top']
log('introspect cell top line %s' % top)
if top.startswith("%"):
prefix = top[1:]
try:
# see if prefix is the name of a jupyter kernel function
kc = eval(prefix + "(get_kernel_client=True)",
namespace, locals())
kn = eval(prefix + "(get_kernel_name=True)", namespace,
locals())
log("jupyter introspect prefix %s kernel %s" %
(prefix, kn)) # e.g. "p2", "python2"
jupyter_introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True),
kc=kc)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
log(lines)
introspect(conn=conn,
id=mesg['id'],
line=mesg['line'],
preparse=mesg.get('preparse', True))
except:
pass
else:
raise RuntimeError("invalid message '%s'" % mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def jupyter_introspect(conn, id, line, preparse, kc):
import jupyter_client
from queue import Empty
try:
salvus = Salvus(conn=conn, id=id)
msg_id = kc.complete(line)
shell = kc.shell_channel
iopub = kc.iopub_channel
# handle iopub responses
while True:
try:
msg = iopub.get_msg(timeout=1)
msg_type = msg['msg_type']
content = msg['content']
except Empty:
# shouldn't happen
log("jupyter iopub channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter iopub recv %s %s" % (msg_type, str(content)))
if msg_type == 'status' and content['execution_state'] == 'idle':
break
# handle shell responses
while True:
try:
msg = shell.get_msg(timeout=10)
msg_type = msg['msg_type']
content = msg['content']
except:
# shouldn't happen
log("jupyter shell channel empty")
break
if msg['parent_header'].get('msg_id') != msg_id:
continue
log("jupyter shell recv %s %s" % (msg_type, str(content)))
if msg_type == 'complete_reply' and content['status'] == 'ok':
# jupyter kernel returns matches like "xyz.append" and smc wants just "append"
matches = content['matches']
offset = content['cursor_end'] - content['cursor_start']
completions = [s[offset:] for s in matches]
mesg = message.introspect_completions(id=id,
completions=completions,
target=line[-offset:])
conn.send_json(mesg)
break
except:
log("jupyter completion exception: %s" % sys.exc_info()[0])
def introspect(conn, id, line, preparse):
salvus = Salvus(
conn=conn, id=id
) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id,
completions=z['result'],
target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id,
docstring=z['result'],
target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id,
source_code=z['result'],
target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
if 'COCALC_SECRET_TOKEN' in os.environ:
secret_token_path = os.environ['COCALC_SECRET_TOKEN']
else:
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send(six.b('n'))
conn.send(
six.
b("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"
% secret_token_path))
conn.close()
n = len(secret_token)
token = six.b('')
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != six.b(secret_token):
log("token='%s'; secret_token='%s'" % (token, secret_token))
conn.send(six.b('n')) # no -- invalid login
conn.send(six.b("Invalid secret token."))
conn.close()
return False
else:
conn.send(six.b('y')) # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s" % mesg)
except Exception as err:
log("Error receiving message: %s (connection terminated)" % str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session." %
mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s" % desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s' % (host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
sage.all.interact = sage_salvus.interact
# Monkey patch the html command.
try:
# need the following for sage_server to start with sage-8.0
# or `import sage.interacts.library` will fail (not really important below, as we don't do that).
import sage.repl.user_globals
sage.repl.user_globals.set_globals(globals())
log("initialized user_globals")
except RuntimeError:
# may happen with sage version < 8.0
log("user_globals.set_globals failed, continuing", sys.exc_info())
sage.all.html = sage.misc.html.html = sage_salvus.html
# CRITICAL: look, we are just going to not do this, and have sage.interacts.library
# be broken. It's **really slow** to do this, and I don't think sage.interacts.library
# ever ended up going anywhere! People use wiki.sagemath.org/interact instead...
#import sage.interacts.library
#sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize'] = [8, 4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = [
'from sage.all import *', 'from sage.calculus.predefined import x',
'import pylab'
]
if extra_imports:
cmds.extend([
'import scipy', 'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'
])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec(cmd, namespace)
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds' %
(time.time() - tm))
for k, v in sage_salvus.interact_functions.items():
namespace[k] = v
# See above -- not doing this, since it is REALLY SLOW to import.
# This does mean that some old code that tries to use interact might break (?).
#namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in [
'anaconda', 'asy', 'attach', 'auto', 'capture', 'cell',
'clear', 'coffeescript', 'cython', 'default_mode',
'delete_last_output', 'dynamic', 'exercise', 'fork', 'fortran',
'go', 'help', 'hide', 'hideall', 'input', 'java', 'javascript',
'julia', 'jupyter', 'license', 'load', 'md', 'mediawiki',
'modes', 'octave', 'pandoc', 'perl', 'plot3d_using_matplotlib',
'prun', 'python_future_feature', 'py3print_mode', 'python',
'python3', 'r', 'raw_input', 'reset', 'restore', 'ruby',
'runfile', 'sage_eval', 'scala', 'scala211',
'script', 'search_doc', 'search_src', 'sh', 'show',
'show_identifiers', 'singular_kernel', 'time', 'timeit',
'typeset_mode', 'var', 'wiki'
]:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[
__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace[
'pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace[
'pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in list(children.keys()):
if os.waitpid(pid, os.WNOHANG) != (0, 0):
log("subprocess %s terminated, closing connection"
% pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection" %
child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception as err:
log("Error taking connection: ", err)
traceback.print_exc(file=open(LOGFILE, 'a'))
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
pid = str(os.getpid())
print("os.getpid() = %s" % pid)
open(pidfile, 'w').write(pid)
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'" %
(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument(
"-p",
dest="port",
type=int,
default=0,
help=
"port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port"
)
parser.add_argument(
"-l",
dest='log_level',
type=str,
default='INFO',
help=
"log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d",
dest="daemon",
default=False,
action="store_const",
const=True,
help="daemon mode (default: False)")
parser.add_argument(
"--host",
dest="host",
type=str,
default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile",
dest="pidfile",
type=str,
default='',
help="store pid in this file")
parser.add_argument(
"--logfile",
dest="logfile",
type=str,
default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c",
dest="client",
default=False,
action="store_const",
const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname",
dest="hostname",
type=str,
default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile",
dest="portfile",
type=str,
default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print(("%s: must specify pidfile in daemon mode" % sys.argv[0]))
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(
port=args.port if args.port else int(open(args.portfile).read()),
hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile, 'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s" % LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
from . import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
| agpl-3.0 |
stephenliu1989/HK_DataMiner | hkdataminer/utils/plot_.py | 1 | 23288 | __author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
def plot_block_matrix(labels, tProb_, name='BlockMatrix'):
print("Plot Block Matrix")
indices = np.argsort(labels)
#print indices
block_matrix = tProb_[:,indices]
block_matrix = block_matrix[indices,:]
block_matrix = 1 - block_matrix
#print block_matrix
pylab.matshow(block_matrix, cmap=plt.cm.OrRd)
plt.colorbar()
plt.savefig('./' + name + '.png', dpi=400)
#pylab.show()
plt.close()
def plot_cluster_size_distribution(populations, name='Populations'):
fig = plt.figure(1, (10,6))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.rc("font", size=30)
plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = range(len(populations))
X_xtick = ['']
for i in xrange(1, len(populations)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
plt.xticks(np.arange(len(populations)+1), X_xtick)
plt.ylabel(r"Probability")
plt.ylim([0,100])
print("X:", X)
distrib.bar(X, populations*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181',
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
def plot_compare_cluster_size_distribution(populations_1, populations_2, name='Populations'):
fig = plt.figure(1, (10,8))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
bar_width = 0.45
plt.rc("font", size=20)
#plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = np.arange(len(populations_1))
X_xtick = ['']
for i in xrange(1, len(populations_1)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X, populations_1*100, facecolor='black', edgecolor='white', width=bar_width,label="kNN Density Peaks 3645 states") #facecolor='#f78181',
# populations_2
#X = range(len(populations_2))
X_xtick = ['']
for i in xrange(1, len(populations_2)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X+bar_width, populations_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states") #facecolor='#f78181',
plt.xticks(np.arange(len(populations_1)+1+bar_width), X_xtick)
#plt.ylabel(r"Fraction number of clusters")
plt.ylabel(r"Probability")
plt.ylim([0,60])
plt.legend()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
#From Wang Wei's code
def plot_landscape(labels=None, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=80, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '.', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in range(0, len(phi_angles)):
if psi_angles[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_angles[i])[0][0] - 1
if phi_angles[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_angles[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_angles)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
if potential is False:
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
else:
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
#Cluster Centers on Free energy landscape distribution
fig = plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.title('Cluster Centers on Free energy landscape distribution', fontsize=20)
plt.xlabel("$k_B T$")
plt.ylabel(r"Probability")
plt.ylim([0, 100])
plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
distrib.bar(np.arange(10), distribution*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181'
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_compare_distribution(labels_1=None, labels_2=None, phi_angles=None, psi_angles=None, phi_ctr_1=None, psi_ctr_1=None, phi_ctr_2=None, psi_ctr_2=None, name='Energy_Landscape', bins=36, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
#extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
#plt.figure(figsize=(10, 10))
#plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
#if labels_1 is not None:
# plt.plot(phi_ctr_1, psi_ctr_1, '*', markersize=8, color='r')
distribution_1 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_1)):
if psi_ctr_1[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_1[i])[0][0] - 1
if phi_ctr_1[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_1[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_1[index_distrib] += 1
distribution_1 /= len(phi_ctr_1)
print(distribution_1)
distribution_2 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_2)):
if psi_ctr_2[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_2[i])[0][0] - 1
if phi_ctr_2[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_2[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_2[index_distrib] += 1
distribution_2 /= len(phi_ctr_2)
print(distribution_2)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
#cbar = plt.colorbar(shrink=0.77)
##plt.title('Free energy landscape', fontsize=20)
#cbar.set_label("$k_B T$", size=20)
#cbar.ax.tick_params(labelsize=20)
#if potential is False:
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-120, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
#else:
# plt.xlim([-75, 75])
# plt.ylim([-75, 75])
# plt.xticks([-50, 0, 50])
# plt.yticks([-50, 0, 50])
#plt.savefig('./' + name + '.png', dpi=400)
##plt.show()
#plt.close()
#Cluster Centers on Free energy landscape distribution
fig=plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
# plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
n_groups = 10
index = np.arange(n_groups)
bar_width = 0.45
distrib.bar(index, distribution_1*100, facecolor='black', edgecolor='white', width=bar_width, label="kNN Density Peaks 3645 states") #facecolor='#f78181'
distrib.bar(index+bar_width, distribution_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states")
#plt.title('Cluster Centers on Free energy landscape distribution', fontsize=10)
plt.xlabel("$k_B T$")
plt.ylabel(r"Fraction number of clusters")
plt.ylim([0, 50])
plt.xticks(index+bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
plt.legend()
#plt.tight_layout()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_landscape_barrier(labels=None, selected=1, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=36, potential=False, outliers=-1):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot points
colors = ['y', 'b', 'tomato', 'm', 'g', 'c', 'yellowgreen']
color_index = 0
clusters = np.unique(labels)
for i in clusters:
if i != outliers:
if i in selected:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '2', alpha=0.20, color=colors[color_index])#, color=colors_jet[i])
color_index += 1
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '*', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in xrange(0, len(phi_ctr)):
if psi_ctr[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr[i])[0][0] - 1
if phi_ctr[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_ctr)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
plt.plot([-103,-103],[30,180],'w') #plot the barrier
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
def calculate_population(labels, name='Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
#states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print("Populations Probability:")
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%")
#bins.append(10**(i+1))
name += '_Populations'
print("name:", name)
plot_cluster_size_distribution(populations=populations, name=name)
print("Done.")
def compare_population(labels_1, labels_2, name='Compare_Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels_1).values())
total_states = np.max(labels_1) + 1
total_frames = len(labels_1)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_1 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_1[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_1)):
populations_1[i] = populations_1[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_1[i]*100, "%")
counts = list(Counter(labels_2).values())
total_states = np.max(labels_2) + 1
total_frames = len(labels_2)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_2 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_2[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_2)):
populations_2[i] = populations_2[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_2[i]*100, "%")
name += '_Populations'
print("name:", name)
plot_compare_cluster_size_distribution(populations_1=populations_1, populations_2=populations_2, name=name)
#plot_cluster_size_distribution(populations_1=populations_1, name=name)
print("Done.")
def calculate_landscape(labels, centers, phi_angles, psi_angles, potential=False, name='Energy_Landscape'):
print("Calculating and plotting Landscape...")
phi_ctr = phi_angles[centers]
psi_ctr = psi_angles[centers]
labels_ctr = labels[centers]
name = name + '_Energy_Landscape'
print("name:", name)
plot_landscape(labels=labels_ctr, phi_angles=phi_angles, psi_angles=psi_angles, phi_ctr=phi_ctr, psi_ctr=psi_ctr, potential=potential, name=name)
print("Done")
#plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles)
| apache-2.0 |
impactlab/eemeter | eemeter/io/serializers.py | 1 | 11692 | import pandas as pd
import numpy as np
import pytz
import warnings
class BaseSerializer(object):
sort_key = None
required_fields = []
datetime_fields = []
def _sort_records(self, records):
if self.sort_key is None:
message = (
'Must supply cls.sort_key in class definition.'
)
raise AttributeError(message)
try:
sorted_records = sorted(records, key=lambda x: x[self.sort_key])
except KeyError:
message = (
'Sorting failed due to missing key {} in record.'
.format(self.sort_key)
)
raise ValueError(message)
return sorted_records
def _validated_tuples_to_dataframe(self, validated_tuples):
if validated_tuples == []:
dts, values, estimateds = [], [], []
else:
dts, values, estimateds = zip(*validated_tuples)
df = pd.DataFrame(
{"value": values, "estimated": estimateds},
index=pd.DatetimeIndex(dts),
columns=["value", "estimated"],
)
df.value = df.value.astype(float)
df.estimated = df.estimated.astype(bool)
return df
def _validate_record_start_end(self, record, start, end):
if start >= end:
message = (
'Record "start" must be earlier than record "end": {}\n'
'{} >= {}.'.format(record, start, end)
)
raise ValueError(message)
def to_dataframe(self, records):
"""
Returns a dataframe of records.
"""
sorted_records = self._sort_records(records)
validated_tuples = list(self.yield_records(sorted_records))
return self._validated_tuples_to_dataframe(validated_tuples)
def yield_records(self, sorted_records):
"""
Yields validated (start (datetime), value (float), estimated (bool))
tuples of data.
"""
raise NotImplementedError('`yield_records()` must be implemented.')
def validate_record(self, record):
# make sure required fields are available
for field in self.required_fields:
if field not in record:
message = (
'Record missing "{}" field:\n{}'
.format(field, record)
)
raise ValueError(message)
# make sure dates/datetimes are tz aware
for field in self.datetime_fields:
dt = record[field]
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
message = (
'Record field ("{}": {}) is not timezone aware:\n{}'
.format(field, dt, record)
)
raise ValueError(message)
def to_records(self, dataframe):
raise NotImplementedError('`to_records()` must be implemented.')
class ArbitrarySerializer(BaseSerializer):
'''
Arbitrary data at arbitrary non-overlapping intervals.
Often used for montly billing data. Records must all have
the "start" key and the "end" key. Overlaps are not allowed and
gaps will be filled with NaN.
For example:
.. code-block:: python
>>> records = [
... {
... "start": datetime(2013, 12, 30, tzinfo=pytz.utc),
... "end": datetime(2014, 1, 28, tzinfo=pytz.utc),
... "value": 1180,
... },
... {
... "start": datetime(2014, 1, 28, tzinfo=pytz.utc),
... "end": datetime(2014, 2, 27, tzinfo=pytz.utc),
... "value": 1211,
... "estimated": True,
... },
... {
... "start": datetime(2014, 2, 28, tzinfo=pytz.utc),
... "end": datetime(2014, 3, 30, tzinfo=pytz.utc),
... "value": 985,
... },
... ]
...
>>> serializer = ArbitrarySerializer()
>>> df = serializer.to_dataframe(records)
>>> df
value estimated
2013-12-30 00:00:00+00:00 1180.0 False
2014-01-28 00:00:00+00:00 1211.0 True
2014-02-27 00:00:00+00:00 NaN False
2014-02-28 00:00:00+00:00 985.0 False
2014-03-30 00:00:00+00:00 NaN False
'''
sort_key = "start"
required_fields = ["start", "end", "value"]
datetime_fields = ["start", "end"]
def validate_record(self, record):
super(ArbitrarySerializer, self)\
.validate_record(record)
self._validate_record_start_end(record, record["start"], record["end"])
def yield_records(self, sorted_records):
previous_end_datetime = None
for record in sorted_records:
self.validate_record(record)
start = record["start"]
end = record["end"]
value = record["value"]
estimated = record.get("estimated", False)
if previous_end_datetime is None or start == previous_end_datetime:
# normal record
yield (start, value, estimated)
previous_end_datetime = end
elif start > previous_end_datetime:
# blank record
yield (previous_end_datetime, np.nan, False)
# normal record
yield (start, value, estimated)
previous_end_datetime = end
else: # start < previous_end_datetime
message = 'Skipping overlapping record: '\
'start ({}) < previous end ({})'\
.format(start, previous_end_datetime)
warnings.warn(message)
# final record carries last datetime, but only if there was a record
if previous_end_datetime is not None:
yield (previous_end_datetime, np.nan, False)
def to_records(self, df):
records = []
for s, e, v, est in zip(df.index, df.index[1:],
df.value, df.estimated):
records.append({
"start": pytz.UTC.localize(s.to_datetime()),
"end": pytz.UTC.localize(e.to_datetime()),
"value": v,
"estimated": bool(est),
})
return records
class ArbitraryStartSerializer(BaseSerializer):
'''
Arbitrary start data at arbitrary non-overlapping intervals.
Records must all have the "start" key. The last data point
will be ignored unless an end date is provided for it.
This is useful for data dated to future energy use, e.g. billing for
delivered fuels.
For example:
.. code-block:: python
>>> records = [
... {
... "start": datetime(2013, 12, 30, tzinfo=pytz.utc),
... "value": 1180,
... },
... {
... "start": datetime(2014, 1, 28, tzinfo=pytz.utc),
... "value": 1211,
... "estimated": True,
... },
... {
... "start": datetime(2014, 2, 28, tzinfo=pytz.utc),
... "value": 985,
... },
... ]
...
>>> serializer = ArbitrarySerializer()
>>> df = serializer.to_dataframe(records)
>>> df
value estimated
2013-12-30 00:00:00+00:00 1180.0 False
2014-01-28 00:00:00+00:00 1211.0 True
2014-02-28 00:00:00+00:00 NaN False
'''
sort_key = "start"
required_fields = ["start", "value"]
datetime_fields = ["start"]
def yield_records(self, sorted_records):
n = len(sorted_records)
for i, record in enumerate(sorted_records):
self.validate_record(record)
start = record["start"]
value = record["value"]
estimated = record.get("estimated", False)
if i < n - 1: # all except last record
yield (start, value, estimated)
else: # last record
end = record.get("end", None)
if end is None:
# can't use the value of this record, no end date
yield (start, np.nan, False)
else:
self._validate_record_start_end(record, start, end)
# provide an end date cap
if pd.notnull(value):
yield (start, value, estimated)
yield (end, np.nan, False)
else:
yield (start, np.nan, False)
def to_records(self, df):
records = []
for i, row in df.iterrows():
records.append({
"start": pytz.UTC.localize(i.to_datetime()),
"value": row.value,
"estimated": bool(row.estimated),
})
return records
class ArbitraryEndSerializer(BaseSerializer):
'''
Arbitrary end data at arbitrary non-overlapping intervals.
Records must all have the "end" key. The first data point
will be ignored unless a start date is provided for it.
This is useful for data dated to past energy use, e.g. electricity
or natural gas bills.
For example:
.. code-block:: python
>>> records = [
... {
... "end": datetime(2013, 12, 30, tzinfo=pytz.utc),
... "value": 1180,
... },
... {
... "end": datetime(2014, 1, 28, tzinfo=pytz.utc),
... "value": 1211,
... "estimated": True,
... },
... {
... "end": datetime(2014, 2, 28, tzinfo=pytz.utc),
... "value": 985,
... },
... ]
...
>>> serializer = ArbitrarySerializer()
>>> df = serializer.to_dataframe(records)
>>> df
value estimated
2013-12-30 00:00:00+00:00 1211.0 True
2014-01-28 00:00:00+00:00 985.0 False
2014-02-28 00:00:00+00:00 NaN False
'''
sort_key = "end"
required_fields = ["end", "value"]
datetime_fields = ["end"]
def yield_records(self, sorted_records):
previous_end_datetime = None
for record in sorted_records:
self.validate_record(record)
end = record["end"]
value = record["value"]
estimated = record.get("estimated", False)
if previous_end_datetime is None:
# first record, might have start
start = record.get("start", None)
if start is not None:
self._validate_record_start_end(record, start, end)
yield (start, value, estimated)
else:
yield (previous_end_datetime, value, estimated)
previous_end_datetime = end
if previous_end_datetime is not None:
yield (previous_end_datetime, np.nan, False)
def to_records(self, df):
records = []
if df.shape[0] > 0:
records.append({
"end": pytz.UTC.localize(df.index[0].to_datetime()),
"value": np.nan,
"estimated": False,
})
for e, v, est in zip(df.index[1:], df.value, df.estimated):
records.append({
"end": pytz.UTC.localize(e.to_datetime()),
"value": v,
"estimated": bool(est),
})
return records
| mit |
UNR-AERIAL/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
jseabold/statsmodels | statsmodels/tsa/statespace/tests/test_save.py | 3 | 4402 | """
Tests of save / load / remove_data state space functionality.
"""
import pickle
import os
import tempfile
import pytest
from statsmodels import datasets
from statsmodels.tsa.statespace import (sarimax, structural, varmax,
dynamic_factor)
from numpy.testing import assert_allclose
current_path = os.path.dirname(os.path.abspath(__file__))
macrodata = datasets.macrodata.load_pandas().data
@pytest.fixture()
def temp_filename():
fd, filename = tempfile.mkstemp()
yield filename
try:
os.close(fd)
os.unlink(filename)
except Exception:
print("Couldn't close or delete file "
"{filename}.".format(filename=filename))
def test_sarimax(temp_filename):
mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = sarimax.SARIMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_sarimax_pickle():
mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0))
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
def test_structural(temp_filename):
mod = structural.UnobservedComponents(
macrodata['realgdp'].values, 'llevel')
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = structural.UnobservedComponentsResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_structural_pickle():
mod = structural.UnobservedComponents(
macrodata['realgdp'].values, 'llevel')
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(pkl_mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
def test_dynamic_factor(temp_filename):
mod = dynamic_factor.DynamicFactor(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values, k_factors=1,
factor_order=1)
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = dynamic_factor.DynamicFactorResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_dynamic_factor_pickle(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
pkl_mod = pickle.loads(pickle.dumps(mod))
res = mod.smooth(mod.start_params)
pkl_res = pkl_mod.smooth(mod.start_params)
assert_allclose(res.params, pkl_res.params)
assert_allclose(res.bse, pkl_res.bse)
assert_allclose(res.llf, pkl_res.llf)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_varmax(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_varmax_pickle(temp_filename):
mod = varmax.VARMAX(
macrodata[['realgdp', 'realcons']].diff().iloc[1:].values,
order=(1, 0))
res = mod.smooth(mod.start_params)
res.summary()
res.save(temp_filename)
res2 = varmax.VARMAXResults.load(temp_filename)
assert_allclose(res.params, res2.params)
assert_allclose(res.bse, res2.bse)
assert_allclose(res.llf, res2.llf)
def test_existing_pickle():
pkl_file = os.path.join(current_path, 'results', 'sm-0.9-sarimax.pkl')
loaded = sarimax.SARIMAXResults.load(pkl_file)
assert isinstance(loaded, sarimax.SARIMAXResultsWrapper)
| bsd-3-clause |
RocketRedNeck/PythonPlayground | pidSim.py | 1 | 18070 | # -*- coding: utf-8 -*-
"""
pidSim.py
A simulation of a vision control to steering PID loop accounting for communication and
processing latency and variation; demonstrates the impact of variation
to successful control when the control variable (CV) has direct influence on
the process variable (PV)
This allows students to experiment with how different elements in the scaling
of a control loop affect performance, this focusing efforts on successful
design.
The model consists of a PID processing software with an asynchronous alignment
with a camera frame which is also asynchronous to image processing software.
Communication latency and jitter are planned as well as image processing impacts.
A plot at the end shows a sample over some specified duration.
The initial conditions of the file represents a case that won't work well until
it is correct by improvements in the constants and image processing rates
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
import matplotlib.pyplot as plot
import numpy as np
tmax_sec = 5.0
dt_sec = 0.001
ts_sec = np.arange(0.0, tmax_sec, 0.001)
nmax = ts_sec.__len__() # round(tmax_sec/dt_sec)
ns = range(0, nmax)
kp = 0.3 # Proportional gain
ki = 0.03 # Integral gain
kd = 0.0 # Derivative gain
kg = 1.0 # Plant (Process) gain
tau_sec = 0.1
sp = np.zeros(nmax) # Will initialize after first image processed
err = np.zeros(nmax)
intErr = np.zeros(nmax)
derrdt = np.zeros(nmax)
lastErr = 0.0
G = np.zeros(nmax) # Process output to be measured
exp = np.exp(-dt_sec/tau_sec)
# Model of the pid task via a java util.timer
# We add a random normal variation for task wakeup since the util.timer
# can only assure that the task wakes up no earlier than scheduled.
# Empirical measurement of the task latency is required for accurate
# modeling, but for now we can just assume about a 10% average
pidPeriod_sec = 0.02;
pidPeriod_index = round(pidPeriod_sec / dt_sec)
pidStart_index = 0 # "time" that PID computation started
pidDuration_sec = 0.001 # Time to complete PID calculation (models software latency)
pidDuration_index = round(pidDuration_sec / dt_sec)
pidEnd_index = pidStart_index + pidDuration_index # "time" that PID computation ended
pidMinJitter_sec = 0.000 # Minimum Random task jitter
pidMinJitter_index = round(pidMinJitter_sec / dt_sec)
pidMaxJitter_sec = 0.000 # Maximum Random task jitter
pidMaxJitter_index = round(pidMaxJitter_sec / dt_sec)
pidMeanJitter_index = round((pidMaxJitter_index + pidMinJitter_index)/2)
pidStdDevJitter_index = round((pidMaxJitter_index - pidMinJitter_index) / 3)
cvPid = np.zeros(nmax) # Initial value of cv coming from PID calculation
# The first communication link is assumed to be a CAN bus
# The bus overhead is assumed to be a total fixed time
# not exceeding about 1 ms for up to four (4) messages going to four (4)
# separate motors (including any increases for bit stuffing); in other words
# we assume something like 100 bits per message all mastered from the same
# location on a 1 Mbps bus.
# The underlying software is assumed to be some queue processing task that
# wakes upon a posted message. A complete review of the path is needed to
# assess whether to task that actually receives the posted message awakens
# immediately (higher priority) or must time slice with all other concurrent
# tasks. If communication tasking is forced to wait for an available cycle
# it is possible that an indeterminate delay may occur at the post-to-wire
# boundary; also, the communication tasking must post all messages queued
# to the wire in close sequence otherwise the motors will be out of phase
# We can inject an estimate of communication jitter as a whole using a
# simple normal distribution
comm0Start_index = 0 # "time" that first communication bus starts
comm0Delay_sec = 0.001 # Time to complete communication (MUST BE LESS THAN PID PERIOD)
comm0Delay_index = round(comm0Delay_sec / dt_sec)
comm0End_index = comm0Start_index + comm0Delay_index
comm0MinJitter_sec = 0.000
comm0MinJitter_index = round(comm0MinJitter_sec / dt_sec)
comm0MaxJitter_sec = 0.000
comm0MaxJitter_index = round(comm0MaxJitter_sec / dt_sec)
comm0MeanJitter_index = round((comm0MaxJitter_index + comm0MinJitter_index)/2)
comm0StdDevJitter_index = round((comm0MaxJitter_index - comm0MinJitter_index) / 3)
cvComm0 = np.zeros(nmax) # cv value delayed for first communication bus
camOffset_sec = 0.0 # Offset to represent asynchronous camera start
camOffset_index = round(camOffset_sec / dt_sec)
camStart_index = camOffset_index # "time" that camera runs
camRate_Hz = 30 # Camera frame rate
camPeriod_sec = 1.0/camRate_Hz
camPeriod_index = round(camPeriod_sec / dt_sec)
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index) / 2) # Time associated with center of image
pvCam = np.zeros(nmax) # process variable delayed for camera framing
# The second communication bus is polled by the imaging software
# The time that the imaging software starts is asynchronous to the
# other system components, and it will not execute again until the
# image processing completes (which itself has some variation)
comm1Start_index = 0 # "time" that second communication bus starts
comm1Delay_sec = 0.020 # Time to complete communication
comm1Delay_index = round(comm1Delay_sec / dt_sec)
comm1End_index = comm1Start_index + comm1Delay_index
comm1MinJitter_sec = 0.000
comm1MinJitter_index = round(comm1MinJitter_sec / dt_sec)
comm1MaxJitter_sec = 0.000
comm1MaxJitter_index = round(comm1MaxJitter_sec / dt_sec)
comm1MeanJitter_index = round((comm1MaxJitter_index + comm1MinJitter_index)/2)
comm1StdDevJitter_index = round((comm1MaxJitter_index - comm1MinJitter_index) / 3)
pvComm1 = np.zeros(nmax) # pv value delayed for second communication bus
# Image processing consists of a bounded, but variable process
# The content of the image and the operating environment will cause the
# associated software to vary; we will use emprical estimates for a current
# approach and will assume the variation has a normal distribution with a
# 3-sigma distribution between the upper and lower limits
pvImageStart_index = 0
pvImageMaxRate_Hz = 5.0
pvImageMinRate_Hz = 3.0
pvImageRateSigma = 3
pvImageMaxDuration_sec = 1.0 / pvImageMinRate_Hz
pvImageMinDuration_sec = 1.0 / pvImageMaxRate_Hz
pvImageMaxDuration_index = round(pvImageMaxDuration_sec / dt_sec)
pvImageMinDuration_index = round(pvImageMinDuration_sec / dt_sec)
pvImageMeanDuration_index = round((pvImageMinDuration_index + pvImageMaxDuration_index)/2)
pvImageStdDevDuration_index = round((pvImageMaxDuration_index - pvImageMinDuration_index) / pvImageRateSigma)
pvImageEnd_index = pvImageStart_index + 2*pvImageMaxDuration_index
pvImage = np.zeros(nmax)
# Final communication link between image processing and the PID
comm2Start_index = 2*pvImageMaxDuration_index # "time" that third communication bus starts (always after image processing)
comm2Delay_sec = 0.020 # Time to complete communication
comm2Delay_index = round(comm2Delay_sec / dt_sec)
comm2End_index = comm2Start_index + comm1Delay_index
comm2Jitter_sec = 0.0 # Later we will add a "random" jitter that delays communication
comm2Jitter_index = round(comm2Jitter_sec / dt_sec)
pvComm2 = np.zeros(nmax) # pv value delayed for third communication bus
pvFinal = np.zeros(nmax)
for n in ns:
# Only run the PID calculation on a period boundary
# i.e., this branch represents the task scheduled on a boundary
# When jitter is enabled we will occasionally add a delay
# representing a late task start (independent of measurement jitter)
# We assume here that the task is delayed and not immediately preempted
# and thus able to make full use of its time slice
if (pidStdDevJitter_index == 0):
pidJitter_index = 0
else:
pidJitter_index = round(np.random.normal(pidMeanJitter_index, pidStdDevJitter_index))
if ((n % (pidPeriod_index + pidJitter_index)) == 0):
#print("@ " + str(n) + " pid start")
pidStart_index = n
pidEnd_index = pidStart_index + pidDuration_index
# Once we get going, we can compute the error as the
# difference of the setpoint and the latest output
# of the process variable (delivered after all sensor and
# communication delays)
if (n > 0):
err[n] = sp[n] - pvFinal[n-1]
# Assume we currently have no way of directly measuring derr
# so we use the err measurement to estimate the error rate
# In this sense, the error rate is an average over the
# previous interval of time since we last looked, thus the
# error rate is in the past
derrdt[n] = (err[n] - err[n-1]) / pidPeriod_sec
# Integrate the error (i.e., add it up)
intErr[n] = intErr[n-1] + err[n]
# Compute the control variable by summing the PID parts
# When the pidEnd_index is reached, the output will be
# forwarded to the communication sequence
cvPid[n] = (kp * err[n]) + (ki * intErr[n]) + (kd * derrdt[n])
elif (n > 0): # Previous output is held until the next task wakeup time
err[n] = err[n-1]
derrdt[n] = derrdt[n-1]
intErr[n] = intErr[n-1]
cvPid[n] = cvPid[n-1]
# Initiate communication delay
if (n == pidEnd_index):
#print("@ " + str(n) + " pid end = " + str(cvPid[n]))
comm0Start_index = n
if (comm0StdDevJitter_index == 0):
comm0Jitter_index = 0
else:
comm0Jitter_index = round(np.random.normal(comm0MeanJitter_index, comm0StdDevJitter_index))
comm0End_index = comm0Start_index + comm0Delay_index + comm0Jitter_index
# When communication delay has been met, move the information along
if (n == comm0End_index):
cvComm0[comm0End_index] = cvPid[comm0Start_index]
#print("@ " + str(n) + " comm0 end = " + str(cvComm0[comm0End_index]))
elif (n > 0): # Otherwise, just hold the previous command
cvComm0[n] = cvComm0[n-1]
# Currently just model the motor, gears, and kinematics as a simple
# time constant without limits
# We will likely improve this fidelity later by adding limiting
# The kinematics (physics) runs "continuously" so we update it
# every time step
G[n] = (kg * cvComm0[n] * (1.0 - exp)) + (G[n-1] * exp)
# Next is the sensor delay, communication, processing, and communication
# on the return path
# The process output will be sensed by a camera and delivered at the
# camera frame rate; the frame interval is asynchronous to all other
# processing periods.
# We currently assume insignificant jitter in the camera rate
# We also are neglecting any blur occuring due to motion
#
# However, we will pick a point midway in the frame to represent
# the time of the relevant image data; depending on the simulation
# time step and modeled frame rate for the camera can cause a jitter
# of up to a time step
if ((n % camPeriod_index) == camOffset_index):
#print("@ " + str(n) + " camera start")
camStart_index = n
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index)/2) # Midpoint in time
# This is a point in time associated with the center pixel of
# the image. For now we will just assume that the item we will measure in the
# image is at the same point in time as the image center.
# Reality is that the difference is small and only important for
# very rapid target motion
# While the center point of the image time is important for averaging
# state on the image data, the frame will not be deliverable until the
# entire frame is ready for the next communication boundary (when the frame
# can be fetched)
if (n == (camEnd_index-1)):
pvCam[camStart_index:camEnd_index] = G[camImage_index]
#print("@ " + str(n) + " camera = " + str(G[camImage_index]))
# Image processing is assumed to operate as fast as it can
# but will have asynchronous start and duration will vary based on
# image content with a well defined lower and upper limit.
#
# The modeling is a small communication delay followed by a variable
# image processing delay; we will model a small normal distribution in
# time but will not model imaging errors
if (n == comm1Start_index):
#print("@ " + str(n) + " COMM1 start")
if (comm1StdDevJitter_index == 0):
comm1Jitter_index = 0
else:
comm1Jitter_index = round(np.random.normal(comm1MeanJitter_index, comm1StdDevJitter_index))
comm1End_index = comm1Start_index + comm1Delay_index + comm1Jitter_index
# Whichever image frame is available will now be forwarded
# We back up one camera period from when communication startsbecause the
# image information won't be available while a frame is being sampled
# The information is placed in the outgoing comm1 buffer at the end of
# communication, effectively delaying the image information and keeping
# the boundaries aynchronous to the resolution of the time step.
if (n == comm1End_index):
if (comm1Start_index >= camPeriod_index):
pvComm1[comm1End_index] = pvCam[comm1Start_index - camPeriod_index]
else:
pvComm1[comm1End_index] = pvCam[comm1Start_index]
#print("@ " + str(n) + " COMM1 end = " + str(pvComm1[comm1End_index]))
# Now that communication has completed, the image processing
# can start; here we represent a variable processing latency
# as a normal distribution between a min and max time assumed
# to be 3-sigma limit
# This is not a precise model of the statistical variation
# of actual image processing, but rather just enough variation
# to observe the impact to a control loop (if any)
pvImageStart_index = comm1End_index
if (pvImageStdDevDuration_index == 0):
pvImageJitter_index = pvImageMeanDuration_index
else:
pvImageJitter_index = round(np.random.normal(pvImageMeanDuration_index, pvImageStdDevDuration_index))
pvImageEnd_index = pvImageStart_index + pvImageJitter_index
elif (n > 0):
pvComm1[n] = pvComm1[n-1]
# When image processing is complete, we can begin to send the result
# to the final communication link and then restart the second comm link
# to read the camera again
if (n == pvImageEnd_index):
pvImage[pvImageEnd_index] = pvComm1[comm1End_index]
#print("@ " + str(n) + " IMAGE PROCESSING end = " + str(pvImage[pvImageEnd_index]))
comm2Start_index = pvImageEnd_index
elif (n > 0):
pvImage[n] = pvImage[n-1]
if (n == comm2Start_index):
comm2End_index = comm2Start_index + comm2Delay_index
#print("@ " + str(n) + " COMM2 start --> end = " + str(comm2End_index))
if (n == comm2End_index):
pvComm2[comm2End_index] = pvImage[comm2Start_index]
#print("@ " + str(n) + " COMM2 end = " + str(pvComm2[comm2End_index]))
comm1Start_index = comm2End_index + 1 # Restart image processing immediately
# Enforce causality
# We delay the awareness of the set point until after the first
# image is processed and communicated; it is only at that moment
# the system becomes aware of the error
if (n < nmax-1):
sp[n+1] = 1.0
elif (n > 0):
pvComm2[n] = pvComm2[n-1]
if (n < nmax-1):
sp[n+1] = sp[n]
pvFinal[n] = pvComm2[n]
plot.figure(1)
plot.cla()
plot.grid()
plot.plot(ts_sec,sp,label='sp')
plot.plot(ts_sec,err,label='err')
#plot.plot(ts_sec,cvPid,label='cvPid')
#plot.plot(ts_sec,cvComm0,'o',label='cvComm0')
plot.plot(ts_sec,G,label='G')
plot.plot(ts_sec,pvCam,label='CameraFrame'),
plot.plot(ts_sec,pvComm1,label='CamComm+ImageProcessing')
plot.plot(ts_sec,pvImage,label='NetworkTableStart')
plot.plot(ts_sec,pvComm2,label='NetworkTableEnd')
#plot.plot(ts_sec,pvFinal,label='pvFinal')
#plot.legend()
plot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
| mit |
ozancaglayan/python-emotiv | utils/ssvep-frequencies.py | 2 | 3298 | #!/usr/bin/env python
import os
import sys
import numpy as np
from scipy import fftpack, signal
from scipy.io import loadmat
from matplotlib import pylab as plt
from emotiv import utils
if __name__ == '__main__':
if len(sys.argv) > 2:
ch = list(sys.argv[2:])
else:
ch = None
try:
folder = sys.argv[1]
resting_sig = loadmat(os.path.join(folder, "eeg-resting"))
ssvep_sig = loadmat(os.path.join(folder, "eeg-ssvep"))
except IndexError, ie:
print "Usage: %s <data-folder> [channel]" % sys.argv[0]
else:
time_step = 1/128.0
sample_count = ssvep_sig['SEQ'].size
sample_freq = fftpack.fftfreq(sample_count, d=time_step)
pidxs = np.where(sample_freq > 0)
freqs = sample_freq[pidxs]
resting_drops = utils.check_packet_drops(resting_sig['SEQ'][0,:])
ssvep_drops = utils.check_packet_drops(ssvep_sig['SEQ'][0,:])
if ssvep_drops:
print "SSVEP drops: %s" % ssvep_drops
if resting_drops:
print "Resting drops: %s" % resting_drops
if ch:
fig, axarr = plt.subplots(5, len(ch), sharex=False)
axarr = np.array(axarr).reshape((-1, len(ch)))
channels = ch
else:
# 3 for matlab meta data arrays, and 1 for SEQ
fig, axarr = plt.subplots(5, len(ssvep_sig) - 4, sharex=False)
channels = [c for c in ssvep_sig.keys() if not c.startswith(("__", "SEQ"))]
for index, channel in enumerate(channels):
rest = signal.detrend(resting_sig[channel][0,:], type='constant')
ssvep = signal.detrend(ssvep_sig[channel][0,:], type='constant')
#rest = resting_sig[channel][0,:]
#ssvep = ssvep_sig[channel][0,:]
diff_signal = ssvep - rest
rest_fft = fftpack.fft(rest)
ssvep_fft = fftpack.fft(ssvep)
diff_fft = fftpack.fft(diff_signal)
# Find powers
rest_power = np.abs(rest_fft)[pidxs]
ssvep_power = np.abs(ssvep_fft)[pidxs]
sig_diff_power = np.abs(diff_fft)[pidxs]
# Normalize
rest_power = (rest_power / rest_power.max()) * 100
ssvep_power = (ssvep_power / ssvep_power.max()) * 100
sig_diff_power = (sig_diff_power / sig_diff_power.max()) * 100
"""
freqs, rest_power = signal.welch(rest, fs=128, scaling='spectrum')
freqs, ssvep_power = signal.welch(ssvep, fs=128)
freqs, sig_diff_power = signal.welch(diff_signal, fs=128)
"""
axarr[0, index].plot(ssvep)
axarr[0, index].plot(rest, color='r')
axarr[0, index].set_title("EEG(%s)" % channel)
axarr[1, index].plot(freqs, rest_power)
axarr[1, index].set_title("Resting(%s)" % channel)
axarr[2, index].plot(freqs, ssvep_power)
axarr[2, index].set_title("SSVEP(%s)" % channel)
axarr[3, index].plot(freqs, ssvep_power - rest_power)
axarr[3, index].set_title("Difference(%s)" % channel)
axarr[4, index].plot(freqs, sig_diff_power)
axarr[4, index].set_title("Signal Difference(%s)" % channel)
fig.tight_layout()
plt.show()
| gpl-3.0 |
serggrom/python-data-mining | DM_4_NP.py | 1 | 5279 | import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randn
from numpy.linalg import inv, qr
from random import normalvariate
import random
data = [6, 7.5, 8, 0, 1]
arr = np.array(data)
#print(arr)
data2 = [[1, 2, 3, 4], [5, 6, 7, 8]]
arr2 = np.array(data2)
#print(arr2)
#print(arr2.ndim)
#print(arr2.shape)
#print(arr.dtype, ' and ', arr2.dtype)
#print(np.zeros((3, 6)))
#print(np.empty((2, 3, 2)))
#print(np.arange(15))
arr3 = np.array([3.7, -1.2, -2.6, 0.5, 12.9, 10.1])
#print(arr3)
#print(arr3.astype(np.int32))
numeric_strings = np.array(['1.25', '-9.6', '42'], dtype=np.string_)
#print(numeric_strings.astype(float))
int_array = np.arange(10)
calibers = np.array([.22, .270, .357, .380, .44, .50], dtype=np.float64)
#print(int_array.astype(calibers.dtype))
arr4 = np.array([[1., 2., 3.], [4., 5., 6.]])
#print(arr4)
#print(arr4-arr4)
#print(1 / arr4)
#print(arr4 ** 0.5)
arr6 = np.arange(10)
arr6[5:8] = 12
#print(arr6)
arr6_slice = arr6[5:8]
arr6_slice[1] = 12345
#print(arr6)
arr6_slice[:] = 64
#print(arr6)
arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
#print(arr2d[2])
#print(arr2d[0, 2])
arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
#print(arr3d)
old_values = arr3d[0].copy()
#print(arr3d[0])
arr3d[0] = old_values
#print(arr3d)
#print(arr3d[1, 0])
#print(arr6[1:6])
#print(arr2d[:2])
#print(arr2d[2, :1])
#print(arr2d[:, :1])
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = randn(7, 4)
#print(names)
#print(data)
#print(names == 'Bob')
#print(data[names == 'Bob'])
#print(data[names == 'Bob', 2:])
#print(data[~(names == 'Bob')])
mask = (names == 'Bob') | (names == 'Will')
#print(mask)
#print(data[mask])
#data[names != 'Joe'] = 7
#print(data)
arr7 = np.empty((8, 4))
for i in range(8):
arr7[i] = i
#print(arr7)
#print(arr7[[4, 3, 0, 6]])
#print(arr7[[-3, -5, -7]])
arr7 = np.arange(32).reshape((8, 4))
#print(arr7[[1, 5, 7, 2], [0, 3, 1, 2]])
#print(arr7[[1, 5, 7, 2]][:, [0, 3, 1, 2]])
#print(arr7[np.ix_([1, 5, 7, 2], [0, 3, 1, 2])])
arr8 = np.arange(15).reshape((3, 5))
#print(arr8)
#print(arr8.T)
arr8 = np.random.randn(6, 3)
#print(np.dot(arr8.T, arr8))
arr8 = np.arange(16).reshape((2, 2, 4))
#print(arr8)
#print(arr8.transpose((1, 0, 2)))
#print(arr8.swapaxes(1, 2))
arr9 = np.arange(10)
#print(np.sqrt(arr9))
#print(np.exp(arr9))
x = randn(8)
y = randn(8)
#print(x)
#print(y)
#print(np.maximum(x, y))
arr9 = randn(7) * 5
#print(arr9)
#print(np.modf(arr9))
points = np.arange(-5, 5, 0.01)
xs, ys = np.meshgrid(points, points)
#print(ys)
z = np.sqrt(xs ** 2 + ys **2)
#print(z)
#plt.imshow(z, cmap=plt.cm.gray)
#plt.colorbar()
#plt.title("Image plot of $\sqrt(x^2 + y^2)$ for a grid values")
#plt.show()
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
#result = [(x if c else y)
# for x, y, c in zip(xarr, yarr, cond)]
#print(result)
result = np.where(cond, xarr, yarr)
#print(result)
arr10 = randn(4, 4)
#print(arr10)
arr10 = np.where(arr10 > 0, 2, -2)
#print(arr10)
#arr10 = np.where(arr10 > 0, 2, arr10)
'''
result = []
for i in range(n):
if cond1[i] and cond2[i]:
result.append(0)
elif cond1[i]:
result.append(1)
elif cond2[i]:
result.append(2)
else:
result.append(3)
'''
arr11 = np.random.randn(5, 4)
#print(arr10.mean())
#print(np.mean(arr10))
#print(arr11.sum())
#print(arr11)
#print(arr11.mean(axis=1))
#print(arr11.sum(0))
arrays = np.array([[0, 1, 2], [3, 4, 5], [7, 8, 9]])
#print(arrays.cumsum())
#print(arrays.cumprod(1))
arr12 = randn(100)
pos = (arr12 > 0).sum()
#print(pos)
bools = np.array([False, False, True, False])
#print(bools.any())
#print(bools.all())
arr13 = randn(8)
#print(arr13)
arr13.sort()
arr13 = randn(5, 3)
#print(arr13)
arr13.sort(1)
#print(arr13)
large_arr = randn(1000)
large_arr.sort()
quantil = large_arr[int(0.05 * len(large_arr))]
#print(quantil)
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Joe', 'Joe'])
#print(np.unique(names))
ints = np.array([3, 3, 3, 2, 2, 1, 1, 4, 4])
#print(np.unique(ints))
#print(sorted(set(names)))
values = np.array([6, 0, 0, 3, 2, 5, 6])
srt = np.in1d(values, [2, 3, 6])
#print(srt)
arr14 = np.arange(10)
np.save('some_array', arr14)
#print(np.load('some_array.npy'))
np.savez('array_archive.npz', a=arr14, b=arr14)
arch = np.load('array_archive.npz')
#print(arch['b'])
x = np.array([[1., 2., 3.], [4., 5., 6.]])
y = np.array([[6., 23.], [-1., 7.], [8, 9]])
z = x.dot(y)
#print(z)
one = np.ones(3)
z = np.dot(x, one)
#print(z)
X = randn(5, 5)
mat = X.T.dot(X)
#print(inv(X))
#print(mat.dot(inv(mat)))
q, r = qr(mat)
#print(r)
samples = np.random.normal(size=(4, 4))
x_samples = randn(1000)
#print(samples)
N = 1000000
#samples.reshape(0, 16)
#%timeit samples = normalvariate(0, 1) for _ in xrange(N)
#plt.plot(x_samples)
#plt.show()
#np.random.seed(12345)
position = 0
walk = [position]
steps = 1000
for i in range(steps):
step = 1 if random.randint(0, 1) else -1
position += step
walk.append(position)
#plt.plot(walk)
#plt.show()
| gpl-3.0 |
timberhill/blablaplot | blablaplot.py | 1 | 6659 | #!/usr/bin/python
from numpy import loadtxt, asarray
from numpy.random import normal as gaussian_noise
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import warnings
"""
Here you register new characters in format:
'<char>' : (<width>, <height>, '<filename>'),
"""
charlist = {
'a' : (0.7, 1.0, 'a'),
'b' : (0.7, 1.0, 'b'),
'c' : (0.7, 1.0, 'c'),
'd' : (0.7, 1.0, 'd'),
'e' : (0.7, 1.0, 'e'),
'f' : (0.7, 1.0, 'f'),
'g' : (0.7, 1.0, 'g'),
'h' : (0.7, 1.0, 'h'),
'i' : (0.4, 1.0, 'i'),
'j' : (0.4, 1.0, 'j'),
'k' : (0.7, 1.0, 'k'),
'l' : (0.7, 1.0, 'l'),
'm' : (0.7, 1.0, 'm'),
'n' : (0.7, 1.0, 'n'),
'o' : (0.7, 1.0, 'o'),
'p' : (0.7, 1.0, 'p'),
'q' : (0.7, 1.0, 'q'),
'r' : (0.7, 1.0, 'r'),
's' : (0.7, 1.0, 's'),
't' : (0.7, 1.0, 't'),
'u' : (0.7, 1.0, 'u'),
'v' : (0.7, 1.0, 'v'),
'w' : (0.7, 1.0, 'w'),
'x' : (0.7, 1.0, 'x'),
'y' : (0.7, 1.0, 'y'),
'z' : (0.7, 1.0, 'z'),
'0' : (0.7, 1.0, '0'),
'1' : (0.5, 1.0, '1'),
'2' : (0.7, 1.0, '2'),
'3' : (0.7, 1.0, '3'),
'4' : (0.7, 1.0, '4'),
'5' : (0.7, 1.0, '5'),
'6' : (0.7, 1.0, '6'),
'7' : (0.7, 1.0, '7'),
'8' : (0.7, 1.0, '8'),
'9' : (0.7, 1.0, '9'),
' ' : (0.7, 0.0, 'space'),
'?' : (0.7, 1.0, 'questionmark'),
'!' : (0.2, 1.0, 'exclamationmark'),
',' : (0.1, 0.1, 'comma'),
'.' : (0.2, 0.1, 'fullstop'),
'&' : (0.6, 1.0, 'ampersand'),
'$' : (0.5, 1.0, 'dollar'),
'@' : (0.7, 1.0, 'at'),
'(' : (0.3, 1.0, 'brackets_open'),
')' : (0.3, 1.0, 'brackets_close'),
'#' : (0.7, 1.0, 'hash'),
'%' : (0.7, 1.0, 'percent'),
}
class Character(object):
"""
ARGUMENTS
char - single character (first one is chosen)
size - size of the letter (width, height)
self.xs, self.ys - arrays with letter points
"""
def __init__(self, char, filename='', size=(1.0, 1.0), jitter=0.0):
if len(char) < 1:
raise Exception('Empty string is passed to Character() constructor.')
self.char = char[0]
if len(filename) > 0:
self.filename = filename
else:
'chars/' + self.char + '.dat'
self._getPoints()
self.resize(size=size)
def _getPoints(self):
xs, ys = loadtxt('chars/' + self.filename + '.dat', unpack=True)
self.xs = asarray(xs)
self.ys = asarray(ys)
self._sort()
def _sort(self):
points = zip(self.xs, self.ys)
sorted_points = sorted(points)
self.xs = asarray([point[0] for point in sorted_points])
self.ys = asarray([point[1] for point in sorted_points])
def resize(self, size=(1.0, 1.0)):
self.size = size
if len(self.xs) < 1:
self._getPoints()
xmin = min(self.xs)
xmax = max(self.xs)
ymin = min(self.ys)
ymax = max(self.ys)
for i in range(0, len(self.xs)):
self.xs[i] = self.size[0] * (self.xs[i] - xmin) / (xmax - xmin)
self.ys[i] = self.size[1] * (self.ys[i] - ymin) / (ymax - ymin)
class TextyPloty(object):
"""
ARGUMENTS
jitter - to randomize points locations, represents sigma for gaussian noise
spacing - distance between letters
offset - offset from zero point if format (x, y)
scale - scale/size of the letters
func - function to add text to
"""
def __init__(self, jitter=0.0, spacing=0.1, offset=(0.0, 0.0), scale=(1.0, 1.0), func=None):
self.jitter = jitter
self.spacing = spacing
self.offset = offset
self.scale = scale
self.func = func
self.charlist = charlist
"""
ARGUMENTS
text - string to plot
RETURNS
xs, ys - points coordinates
"""
def get(self, text):
xs, ys = [], []
xoffset = self.offset[0]
for char in text:
if char == ' ':
xoffset += self.charlist[char][0] * self.scale[0]
elif char == '\t':
xoffset += self.charlist[char][0] * self.scale[0] * 4
elif char in self.charlist:
charobj = Character(char=char, filename=self.charlist[char][2], size=self.charlist[char])
xs.extend(self.scale[0] * charobj.xs + xoffset)
ys.extend(self.scale[1] * charobj.ys + self.offset[1])
xoffset += self.charlist[char][0] * self.scale[0]
else:
warnings.warn('Could not find file with "' + char + '" character. Skipping...', Warning)
xoffset += self.spacing * self.scale[0]
if self.func != None:
for i in range(0,len(xs)):
ys[i] += self.func(xs[i])
if self.jitter > 0:
noise = gaussian_noise(0.0, self.jitter*self.scale[1], (len(ys)))
ys = [x+y for x, y in zip(ys, noise)]
return asarray(xs), asarray(ys)
class ResidualsPlot(object):
"""
"""
def __init__(self, data=([],[]), datastyle='k.', xs_fit=[], func=None, fitstyle='r-', \
xlabel='', ylabel='', reslabel='', ratio=[4, 1], figsize=(10,6), axis=None, res_axis=None, \
fitlabel='fit', datalabel='points'):
self.plt_instance = plt
self.xs = data[0]
self.ys = data[1]
self.datastyle = datastyle
self.xs_fit = xs_fit
self.func = func
self.ys_fit = self.func(self.xs_fit)
self.fitstyle = fitstyle
self.xlabel = xlabel
self.ylabel = ylabel
self.reslabel = reslabel
self.ratio = ratio
self.figsize = figsize
self.axis = axis
self.res_axis = res_axis
self.fitlabel = fitlabel
self.datalabel = datalabel
def draw(self):
self.redraw()
def redraw(self):
self.plt_instance = plt
self.plt_instance.figure(figsize=self.figsize)
self.gridspec_instance = gridspec.GridSpec(2, 1, height_ratios=self.ratio)
self.gridspec_instance.update(hspace=0.00)
self.ax0 = self.plt_instance.subplot(self.gridspec_instance[0])
self.ax1 = self.plt_instance.subplot(self.gridspec_instance[1])
self.ys_res = self.ys - self.func(self.xs)
# set axis ranges
if self.axis == None:
self.ax0.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_fit) * 1.1, max(self.ys_fit) * 1.1])
elif len(self.axis) != 4:
raise Exception('ResidualsPlot: axis should contain 4 numbers: (x1, x2, y1, y2)')
else:
self.ax0.axis(self.axis)
if self.res_axis == None:
self.ax1.axis([min(self.xs_fit) * 1.1, max(self.xs_fit)*1.1, min(self.ys_res) * 1.1, max(self.ys_res)*1.1])
elif len(self.res_axis) != 4:
raise Exception('ResidualsPlot: res_axis should contain 4 numbers: (x1, x2, y1, y2)')
else:
self.ax1.axis(self.res_axis)
# set axis labels
self.ax0.set_ylabel(self.ylabel)
self.ax1.set_ylabel(self.reslabel)
self.ax1.set_xlabel(self.xlabel)
# first subplot: datapoints and fit
self.ax0.plot(self.xs_fit, self.ys_fit, self.fitstyle, label=self.fitlabel)
self.ax0.plot(self.xs, self.ys, self.datastyle, label=self.datalabel)
# second subplot: residuals
self.ax1.plot([min(self.xs), max(self.xs)], [0,0], self.fitstyle)
self.ax1.plot(self.xs, self.ys_res, self.datastyle)
self.ax0.legend(loc="upper right")
def show(self):
self.plt_instance.show()
def savefig(self, name='plot.pdf'):
self.plt_instance.savefig(name)
| mit |
smeerten/jellyfish | Jellyfish.py | 1 | 31810 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017-2021 Wouter Franssen and Bas van Meerten
# This file is part of Jellyfish.
#
# Jellyfish is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Jellyfish is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Jellyfish. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import numpy as np
from PyQt5 import QtGui, QtCore, QtWidgets
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from spectrumFrame import Plot1DFrame
import engine as en
NSTEPS = 1000
def safeEval(inp, *args):
try:
return eval(inp)
except Exception:
return None
class PlotFrame(Plot1DFrame):
def __init__(self, root, fig, canvas):
super(PlotFrame, self).__init__(root, fig, canvas)
self.canvas.mpl_connect('button_press_event', self.buttonPress)
self.canvas.mpl_connect('button_release_event', self.buttonRelease)
self.canvas.mpl_connect('motion_notify_event', self.pan)
self.canvas.mpl_connect('scroll_event', self.scroll)
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.setFocus()
self.xmaxlim = None
self.xminlim = None
self.ymaxlim = None
self.yminlim = None
def setData(self, xdata, ydata):
self.xdata = xdata
self.ydata = ydata
def plotReset(self, xReset=True, yReset=True): # set the plot limits to min and max values
miny = min(np.real(self.ydata))
maxy = max(np.real(self.ydata))
differ = 0.05 * (maxy - miny) # amount to add to show all datapoints (10%)
if yReset:
self.yminlim = miny - differ
self.ymaxlim = maxy + differ
axMult = 1.0
if xReset:
self.xminlim = min(self.xdata * axMult)
self.xmaxlim = max(self.xdata * axMult)
self.ax.set_xlim(self.xmaxlim, self.xminlim)
self.ax.set_ylim(self.yminlim, self.ymaxlim)
def showFid(self):
self.ax.cla()
self.ax.plot(self.xdata, np.real(self.ydata))
if self.xmaxlim is None:
self.plotReset()
self.ax.set_xlim(self.xmaxlim, self.xminlim)
self.ax.set_ylim(self.yminlim, self.ymaxlim)
self.ax.set_xlabel('Shift [ppm]')
self.canvas.draw()
class SettingsFrame(QtWidgets.QWidget):
def __init__(self, parent):
super(SettingsFrame, self).__init__(parent)
self.father = parent
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("B0 [T]:"), 0, 0, QtCore.Qt.AlignHCenter)
self.B0Setting = QtWidgets.QLineEdit(self)
self.B0Setting.setAlignment(QtCore.Qt.AlignHCenter)
self.B0Setting.setText(str(self.father.B0))
self.B0Setting.returnPressed.connect(self.ApplySettings)
grid.addWidget(self.B0Setting, 0, 1)
self.LbType = QtWidgets.QComboBox()
self.LbType.addItems(['Line Width [Hz]:', 'Line Width [ppm]:'])
self.LbType.currentIndexChanged.connect(self.ChangeLbSetting)
grid.addWidget(self.LbType, 1, 0)
self.LbSetting = QtWidgets.QLineEdit(self)
self.LbSetting.setAlignment(QtCore.Qt.AlignHCenter)
self.LbSetting.setText(str(self.father.Lb))
self.LbSetting.returnPressed.connect(lambda: self.ApplySettings(False, False))
grid.addWidget(self.LbSetting, 1, 1)
grid.addWidget(QtWidgets.QLabel("# Points [x1024]:"), 0, 2, QtCore.Qt.AlignHCenter)
self.NumPointsSetting = QtWidgets.QLineEdit(self)
self.NumPointsSetting.setAlignment(QtCore.Qt.AlignHCenter)
self.NumPointsSetting.setText(str(int(self.father.NumPoints/1024)))
self.NumPointsSetting.returnPressed.connect(lambda: self.ApplySettings(False, False))
grid.addWidget(self.NumPointsSetting, 0, 3)
grid.addWidget(QtWidgets.QLabel("Ref Nucleus:"), 1, 2, QtCore.Qt.AlignHCenter)
self.RefNucleusSettings = QtWidgets.QComboBox()
self.RefNucleusSettings.addItems(en.ABBREVLIST)
self.RefNucleusSettings.setCurrentIndex(0)
self.RefNucleusSettings.currentIndexChanged.connect(self.ApplySettings)
grid.addWidget(self.RefNucleusSettings, 1, 3)
grid.addWidget(QtWidgets.QLabel("x Min [ppm]:"), 0, 4, QtCore.Qt.AlignHCenter)
self.XminSetting = QtWidgets.QLineEdit(self)
self.XminSetting.setAlignment(QtCore.Qt.AlignHCenter)
self.XminSetting.setText(str(self.father.Limits[0]))
self.XminSetting.returnPressed.connect(lambda: self.ApplySettings(True, False))
grid.addWidget(self.XminSetting, 0, 5)
grid.addWidget(QtWidgets.QLabel("x Max [ppm]:"), 1, 4, QtCore.Qt.AlignHCenter)
self.XmaxSetting = QtWidgets.QLineEdit(self)
self.XmaxSetting.setAlignment(QtCore.Qt.AlignHCenter)
self.XmaxSetting.setText(str(self.father.Limits[1]))
self.XmaxSetting.returnPressed.connect(lambda: self.ApplySettings(True, False))
grid.addWidget(self.XmaxSetting, 1, 5)
grid.setColumnStretch(10, 1)
grid.setRowStretch(10, 1)
def ChangeLbSetting(self):
if self.LbType.currentIndex() == 0: #From ppm
self.LbSetting.setText(str(safeEval(self.LbSetting.text()) * (self.father.RefFreq * 1e-6)))
else:
self.LbSetting.setText(str(safeEval(self.LbSetting.text()) / (self.father.RefFreq * 1e-6)))
def ApplySettings(self, ResetAxis=False, recalc=True):
self.father.B0 = safeEval(self.B0Setting.text())
self.father.RefNucleus = en.ABBREVLIST[self.RefNucleusSettings.currentIndex()]
self.father.SetRefFreq()
if self.LbType.currentIndex() == 0:
self.father.Lb = safeEval(self.LbSetting.text())
else:
self.father.Lb = safeEval(self.LbSetting.text()) * (self.father.RefFreq * 1e-6)
self.father.NumPoints = safeEval(self.NumPointsSetting.text()) * 1024
self.father.Limits[0] = float(self.XminSetting.text())
self.father.Limits[1] = float(self.XmaxSetting.text())
self.father.sim(ResetAxis, recalc=recalc)
class SpinsysFrame(QtWidgets.QWidget):
def __init__(self, parent):
super(SpinsysFrame, self).__init__(parent)
self.father = parent
self.Jmatrix = np.array([])
self.grid = QtWidgets.QGridLayout(self)
self.spinGroup = QtWidgets.QGroupBox('Spin System:')
self.spinFrame = QtWidgets.QGridLayout()
self.spinGroup.setLayout(self.spinFrame)
self.grid.addWidget(self.spinGroup, 0, 0, 1, 5)
self.StrongToggle = QtWidgets.QCheckBox('Strong coupling')
self.StrongToggle.setChecked(True)
self.StrongToggle.stateChanged.connect(self.changeStrong)
self.spinFrame.addWidget(self.StrongToggle, 1, 0, 1, 6)
self.addButton = QtWidgets.QPushButton("Add isotope")
self.addButton.clicked.connect(self.addIsotopeManager)
self.spinFrame.addWidget(self.addButton, 2, 0, 1, 6)
self.setJButton = QtWidgets.QPushButton("Set J-couplings")
self.setJButton.clicked.connect(self.setJManager)
self.spinFrame.addWidget(self.setJButton, 3, 0, 1, 6)
self.spinFrame.addWidget(QtWidgets.QLabel("#:"), 5, 0, QtCore.Qt.AlignHCenter)
self.spinFrame.addWidget(QtWidgets.QLabel("Type:"), 5, 1, QtCore.Qt.AlignHCenter)
self.spinFrame.addWidget(QtWidgets.QLabel("Shift [ppm]:"), 5, 2, QtCore.Qt.AlignHCenter)
self.spinFrame.addWidget(QtWidgets.QLabel("Multiplicity:"), 5, 3, QtCore.Qt.AlignHCenter)
self.spinFrame.addWidget(QtWidgets.QLabel("Detect:"), 5, 4, QtCore.Qt.AlignHCenter)
self.spinFrame.addWidget(QtWidgets.QLabel("Remove:"), 5, 5, QtCore.Qt.AlignHCenter)
self.spinSysWidgets = {'Number':[], 'Isotope':[], 'Shift':[], 'Multi':[], 'Detect':[], 'Remove':[]}
self.sliderTypes = {'Type':[], 'Spins':[]}
self.sliderWidgets = {'Label':[], 'Slider':[], 'Remove':[]}
self.Nspins = 0
self.sliderGroup = QtWidgets.QGroupBox('Sliders:')
self.sliderFrame = QtWidgets.QGridLayout()
self.sliderGroup.setLayout(self.sliderFrame)
self.addSliderButton = QtWidgets.QPushButton("Add slider")
self.addSliderButton.clicked.connect(self.addSliderManager)
self.sliderFrame.addWidget(self.addSliderButton, 100, 0, 1, 6)
self.grid.addWidget(self.sliderGroup, 1, 0, 1, 5)
self.grid.setColumnStretch(200, 1)
self.grid.setRowStretch(200, 1)
def changeStrong(self, state):
if state:
self.father.StrongCoupling = True
else:
self.father.StrongCoupling = False
self.parseSpinSys(False)
def addSpin(self, Isotope, Shift, Multiplicity, Detect, Sim=True):
self.Nspins += 1
self.spinSysWidgets['Number'].append(QtWidgets.QLabel(str(self.Nspins)))
self.spinSysWidgets['Isotope'].append(QtWidgets.QLabel(Isotope))
self.spinFrame.addWidget(self.spinSysWidgets['Isotope'][-1], 5 + self.Nspins, 1)
self.spinFrame.addWidget(self.spinSysWidgets['Number'][-1], 5 + self.Nspins, 0)
self.spinSysWidgets['Shift'].append(QtWidgets.QLineEdit())
self.spinSysWidgets['Shift'][-1].setAlignment(QtCore.Qt.AlignHCenter)
self.spinSysWidgets['Shift'][-1].setText(str(Shift))
self.spinSysWidgets['Shift'][-1].returnPressed.connect(self.parseSpinSys)
self.spinFrame.addWidget(self.spinSysWidgets['Shift'][-1], 5 + self.Nspins, 2)
self.spinSysWidgets['Multi'].append(QtWidgets.QSpinBox())
self.spinSysWidgets['Multi'][-1].setValue(Multiplicity)
self.spinSysWidgets['Multi'][-1].setMinimum(1)
self.spinSysWidgets['Multi'][-1].valueChanged.connect(lambda: self.parseSpinSys())
self.spinFrame.addWidget(self.spinSysWidgets['Multi'][-1], 5 + self.Nspins, 3)
self.spinSysWidgets['Detect'].append(QtWidgets.QCheckBox())
self.spinSysWidgets['Detect'][-1].setChecked(Detect)
self.spinSysWidgets['Detect'][-1].stateChanged.connect(lambda: self.parseSpinSys())
self.spinFrame.addWidget(self.spinSysWidgets['Detect'][-1], 5 + self.Nspins, 4)
self.spinSysWidgets['Remove'].append(QtWidgets.QPushButton("X"))
self.spinSysWidgets['Remove'][-1].clicked.connect((lambda n: lambda: self.removeSpin(n))(self.Nspins))
self.spinFrame.addWidget(self.spinSysWidgets['Remove'][-1], 5 + self.Nspins, 5)
temp = np.zeros((self.Nspins, self.Nspins))
temp[:-1, :-1] = self.Jmatrix
self.Jmatrix = temp
if Sim:
self.parseSpinSys(True)
def setJManager(self):
dialog = setJWindow(self, self.Jmatrix)
if dialog.exec_():
if dialog.closed:
return
else:
self.Jmatrix = dialog.Jmatrix
self.parseSpinSys()
def addIsotopeManager(self):
dialog = addIsotopeWindow(self)
if dialog.exec_():
if dialog.closed:
return
else:
self.addSpin(dialog.Isotope, dialog.Shift, dialog.Multi, True)
def addSliderManager(self):
dialog = addSliderWindow(self, self.Nspins)
if dialog.exec_():
if dialog.closed:
return
else:
num = len(self.sliderWidgets['Slider']) + 1
self.sliderWidgets['Slider'].append(QtWidgets.QSlider(QtCore.Qt.Horizontal))
self.sliderWidgets['Slider'][-1].setRange(dialog.min * NSTEPS, dialog.max * NSTEPS)
self.sliderWidgets['Remove'].append(QtWidgets.QPushButton("X"))
self.sliderWidgets['Remove'][-1].clicked.connect((lambda n: lambda: self.removeSlider(n))(num))
if dialog.type == 0: #If B0
self.sliderWidgets['Slider'][-1].valueChanged.connect(self.setB0)
self.sliderWidgets['Label'].append(QtWidgets.QLabel('B<sub>0</sub>:'))
self.sliderWidgets['Slider'][-1].setValue(self.father.B0*NSTEPS)
self.sliderTypes['Spins'].append([None])
self.sliderTypes['Type'].append('B0')
if dialog.type == 1: #If shift
spin = dialog.spin1
self.sliderWidgets['Slider'][-1].valueChanged.connect((lambda n, x: lambda: self.setShift(n, x))(spin, len(self.sliderWidgets['Slider'])))
self.sliderWidgets['Label'].append(QtWidgets.QLabel('Shift (#' + str(spin) + ')'))
self.sliderWidgets['Slider'][-1].setValue(safeEval(self.spinSysWidgets['Shift'][spin-1].text()) * NSTEPS)
self.sliderTypes['Spins'].append([spin])
self.sliderTypes['Type'].append('Shift')
if dialog.type == 2: #If J
spin = dialog.spin1
spin2 = dialog.spin2
self.sliderWidgets['Slider'][-1].valueChanged.connect((lambda n, m, x: lambda: self.setJ(n, m, x))(spin, spin2, len(self.sliderWidgets['Slider'])))
self.sliderWidgets['Label'].append(QtWidgets.QLabel('J (' + str(spin) + ',' + str(spin2) + ')'))
self.sliderWidgets['Slider'][-1].setValue(self.Jmatrix[spin - 1, spin2 - 1] * NSTEPS)
self.sliderTypes['Spins'].append([spin, spin2])
self.sliderTypes['Type'].append('J')
self.sliderFrame.addWidget(self.sliderWidgets['Label'][-1], 100 + num, 0)
self.sliderFrame.addWidget(self.sliderWidgets['Slider'][-1], 100 + num, 1, 1, 4)
self.sliderFrame.addWidget(self.sliderWidgets['Remove'][-1], 100 + num, 5)
def setB0(self, B0):
self.father.setB0(float(B0)/NSTEPS)
def setShift(self, spinNum, widgetNum):
self.spinSysWidgets['Shift'][spinNum - 1].setText(str(float(self.sliderWidgets['Slider'][widgetNum - 1].value()) / NSTEPS))
self.parseSpinSys()
def setJ(self, spin1Num, spin2Num, widgetNum):
J = float(self.sliderWidgets['Slider'][widgetNum - 1].value()) / NSTEPS
self.Jmatrix[spin1Num - 1, spin2Num - 1] = J
self.parseSpinSys()
def removeSlider(self, index):
for var in self.sliderWidgets.keys():
self.grid.removeWidget(self.sliderWidgets[var][index - 1])
self.sliderWidgets[var][index - 1].setParent(None)
self.sliderWidgets[var][index - 1] = None
self.sliderTypes['Type'][index - 1] = None
def removeSpin(self, index):
backup = self.spinSysWidgets.copy()
for spin in range(self.Nspins):
for var in self.spinSysWidgets.keys():
self.grid.removeWidget(self.spinSysWidgets[var][spin])
self.spinSysWidgets[var][spin].setParent(None)
removeSliders = []
for sliderVal in range(len(self.sliderWidgets['Slider'])):
if self.sliderTypes['Type'][sliderVal] == 'Shift':
sliderSpinTmp = self.sliderTypes['Spins'][sliderVal][0]
if sliderSpinTmp == index:
removeSliders.append(sliderVal)
elif sliderSpinTmp > index:
self.sliderWidgets['Slider'][sliderVal].valueChanged.disconnect()
self.sliderWidgets['Slider'][sliderVal].valueChanged.connect((lambda n, x: lambda: self.setShift(n, x))(sliderSpinTmp - 1, sliderVal + 1))
self.sliderWidgets['Label'][sliderVal].setText('Shift (#' + str(sliderSpinTmp - 1) + ')')
self.sliderTypes['Spins'][sliderVal] = [sliderSpinTmp - 1]
elif self.sliderTypes['Type'][sliderVal] == 'J':
SpinTmp = self.sliderTypes['Spins'][sliderVal]
SpinBool = [a > index for a in SpinTmp]
if index in SpinTmp:
removeSliders.append(sliderVal)
elif SpinBool[0] or SpinBool[1]: #If change is needed
Spins = [None, None]
for i in range(len(SpinTmp)):
Spins[i] = SpinTmp[i] - SpinBool[i]
self.sliderWidgets['Slider'][sliderVal].valueChanged.disconnect()
self.sliderWidgets['Slider'][sliderVal].valueChanged.connect((lambda n, m, x: lambda: self.setJ(n, m, x))(Spins[0], Spins[1], sliderVal + 1))
self.sliderTypes['Spins'][sliderVal] = Spins
self.sliderWidgets['Label'][sliderVal].setText('J (' + str(Spins[0]) + ',' + str(Spins[1]) + ')')
#Remove sliders via emitting their remove signal
sliderDelIndex = 0
for iii in removeSliders:
self.sliderWidgets['Remove'][iii - sliderDelIndex].click()
sliderDelIndex += 1
self.Nspins = 0
Jtemp = self.Jmatrix
Jtemp = np.delete(Jtemp, index - 1, 0)
Jtemp = np.delete(Jtemp, index - 1, 1)
self.Jmatrix = np.array([])
self.spinSysWidgets = {'Number':[], 'Isotope':[], 'Shift':[], 'Multi':[], 'Detect':[], 'Remove':[]}
for spin in range(len(backup['Shift'])):
if spin != index - 1:
self.addSpin(backup['Isotope'][spin].text(), float(backup['Shift'][spin].text()), backup['Multi'][spin].value(), backup['Detect'][spin].checkState(), Sim=False)
self.Jmatrix = Jtemp
del backup
self.parseSpinSys()
def parseSpinSys(self, ResetAxis=False):
self.father.SpinList = []
NSpins = len(self.spinSysWidgets['Isotope'])
SpinList = []
for Spin in range(NSpins):
SpinList.append([self.spinSysWidgets['Isotope'][Spin].text(), safeEval(self.spinSysWidgets['Shift'][Spin].text()),
self.spinSysWidgets['Multi'][Spin].value(), self.spinSysWidgets['Detect'][Spin].checkState()])
self.father.Jmatrix = self.Jmatrix
self.father.SpinList = SpinList
self.father.sim(ResetAxis, ResetAxis)
class addIsotopeWindow(QtWidgets.QDialog):
def __init__(self, parent):
super(addIsotopeWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.father = parent
self.Isotope = ''
self.Shift = 0
self.Multi = 0
self.closed = False
self.setWindowTitle("Add Isotope")
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Type:"), 0, 0, QtCore.Qt.AlignHCenter)
grid.addWidget(QtWidgets.QLabel("Shift [ppm]:"), 0, 1, QtCore.Qt.AlignHCenter)
grid.addWidget(QtWidgets.QLabel("Multiplicity:"), 0, 2, QtCore.Qt.AlignHCenter)
self.typeSetting = QtWidgets.QComboBox()
self.typeSetting.addItems(en.ABBREVLIST)
self.typeSetting.setCurrentIndex(0)
grid.addWidget(self.typeSetting, 1, 0)
self.shiftSetting = QtWidgets.QLineEdit()
self.shiftSetting.setText(str(0))
grid.addWidget(self.shiftSetting, 1, 1)
self.multiSettings = QtWidgets.QSpinBox()
self.multiSettings.setValue(1)
self.multiSettings.setMinimum(1)
grid.addWidget(self.multiSettings, 1, 2)
cancelButton = QtWidgets.QPushButton("&Cancel")
cancelButton.clicked.connect(self.closeEvent)
grid.addWidget(cancelButton, 13, 0)
okButton = QtWidgets.QPushButton("&Ok")
okButton.clicked.connect(self.applyAndClose)
grid.addWidget(okButton, 13, 2)
self.show()
self.setFixedSize(self.size())
def closeEvent(self, *args):
self.closed = True
self.accept()
self.deleteLater()
def applyAndClose(self):
self.Isotope = en.ABBREVLIST[self.typeSetting.currentIndex()]
self.Shift = safeEval(self.shiftSetting.text())
self.Multi = self.multiSettings.value()
self.accept()
self.deleteLater()
class setJWindow(QtWidgets.QDialog):
def __init__(self, parent, Jmatrix):
super(setJWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.setWindowTitle("Set J-couplings")
self.father = parent
self.closed = False
self.Jmatrix = Jmatrix
self.numSpins = Jmatrix.shape[0]
grid = QtWidgets.QGridLayout(self)
self.jInputWidgets = [[None] * self.numSpins for x in range(self.numSpins)]
grid.addWidget(QtWidgets.QLabel('<b>Spin #</b>'), 0, 0, QtCore.Qt.AlignHCenter)
for spin in range(self.numSpins):
grid.addWidget(QtWidgets.QLabel('<b>' + str(spin + 1) + '</b>'), spin + 1, 0, QtCore.Qt.AlignHCenter)
grid.addWidget(QtWidgets.QLabel('<b>' + str(spin + 1) + '</b>'), 0, spin + 1, QtCore.Qt.AlignHCenter)
for subspin in range(self.numSpins):
if subspin > spin:
self.jInputWidgets[spin][subspin] = QtWidgets.QLineEdit()
self.jInputWidgets[spin][subspin].setText(str(self.Jmatrix[spin, subspin]))
self.jInputWidgets[spin][subspin].setAlignment(QtCore.Qt.AlignHCenter)
grid.addWidget(self.jInputWidgets[spin][subspin], spin + 1, subspin + 1)
grid.setColumnMinimumWidth(1, 50)
cancelButton = QtWidgets.QPushButton("&Cancel")
cancelButton.clicked.connect(self.closeEvent)
grid.addWidget(cancelButton, self.numSpins + 5, 0)
okButton = QtWidgets.QPushButton("&Ok")
okButton.clicked.connect(self.applyAndClose)
grid.addWidget(okButton, self.numSpins + 5, self.numSpins + 5)
self.show()
self.setFixedSize(self.size())
def closeEvent(self, *args):
self.closed = True
self.accept()
self.deleteLater()
def applyAndClose(self):
for spin in range(self.numSpins):
for subspin in range(self.numSpins):
if subspin > spin:
val = safeEval(self.jInputWidgets[spin][subspin].text())
if val == None:
return
self.Jmatrix[spin, subspin] = val
self.accept()
self.deleteLater()
class addSliderWindow(QtWidgets.QDialog):
def __init__(self, parent, numSpins):
super(addSliderWindow, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.Tool)
self.setWindowTitle("Add slider")
self.father = parent
self.closed = False
self.numSpins = numSpins
self.type = 0
self.min = 0
self.max = 10
self.spin1 = 0
self.spin2 = 0
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel('Type:'), 0, 0, QtCore.Qt.AlignHCenter)
self.typeSetting = QtWidgets.QComboBox()
self.typeSetting.addItems(['B0 [T]', 'Shift [ppm]', 'J-coupling [Hz]'])
self.typeSetting.currentIndexChanged.connect(self.typeChanged)
grid.addWidget(self.typeSetting, 1, 0, QtCore.Qt.AlignHCenter)
grid.addWidget(QtWidgets.QLabel('Minimum:'), 2, 0, QtCore.Qt.AlignHCenter)
grid.addWidget(QtWidgets.QLabel('Maximum:'), 2, 1, QtCore.Qt.AlignHCenter)
self.minInput = QtWidgets.QLineEdit()
self.minInput.setText(str(self.min))
grid.addWidget(self.minInput, 3, 0, QtCore.Qt.AlignHCenter)
self.maxInput = QtWidgets.QLineEdit()
self.maxInput.setText(str(self.max))
grid.addWidget(self.maxInput, 3, 1, QtCore.Qt.AlignHCenter)
self.spin1Label = QtWidgets.QLabel('Spin:')
grid.addWidget(self.spin1Label, 4, 0, QtCore.Qt.AlignHCenter)
self.spin1Label.hide()
self.spin1Value = QtWidgets.QSpinBox()
self.spin1Value.setValue(1)
self.spin1Value.setMinimum(1)
self.spin1Value.setMaximum(self.numSpins)
grid.addWidget(self.spin1Value, 5, 0)
self.spin1Value.hide()
self.spin2Label = QtWidgets.QLabel('Spin #2:')
grid.addWidget(self.spin2Label, 4, 1, QtCore.Qt.AlignHCenter)
self.spin2Label.hide()
self.spin2Value = QtWidgets.QSpinBox()
self.spin2Value.setValue(1)
self.spin2Value.setMinimum(1)
self.spin2Value.setMaximum(self.numSpins)
grid.addWidget(self.spin2Value, 5, 1)
self.spin2Value.hide()
cancelButton = QtWidgets.QPushButton("&Cancel")
cancelButton.clicked.connect(self.closeEvent)
grid.addWidget(cancelButton, 10, 0)
okButton = QtWidgets.QPushButton("&Ok")
okButton.clicked.connect(self.applyAndClose)
grid.addWidget(okButton, 10, 1)
grid.setRowStretch(9, 1)
self.show()
def typeChanged(self):
self.type = self.typeSetting.currentIndex()
if self.type == 0:
self.spin1Label.hide()
self.spin1Value.hide()
self.spin2Label.hide()
self.spin2Value.hide()
elif self.type == 1:
self.spin1Label.show()
self.spin1Value.show()
self.spin2Label.hide()
self.spin2Value.hide()
elif self.type == 2:
self.spin1Label.show()
self.spin1Value.show()
self.spin2Label.show()
self.spin2Value.show()
def closeEvent(self, *args):
self.closed = True
self.accept()
self.deleteLater()
def applyAndClose(self):
self.min = safeEval(self.minInput.text())
self.max = safeEval(self.maxInput.text())
self.spin1 = self.spin1Value.value()
self.spin2 = self.spin2Value.value()
if self.type == 2:
if self.spin1 == self.spin2:
return
if self.spin2 < self.spin1:
self.spin1, self.spin2 = (self.spin2, self.spin1)
if self.min == None or self.max == None:
return
if self.min > self.max:
self.min, self.max = (self.max, self.min)
self.accept()
self.deleteLater()
class MainProgram(QtWidgets.QMainWindow):
def __init__(self, root):
super(MainProgram, self).__init__()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.main_widget = QtWidgets.QWidget(self)
self.mainFrame = QtWidgets.QGridLayout(self.main_widget)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.menubar = self.menuBar()
self.filemenu = QtWidgets.QMenu('&File', self)
self.menubar.addMenu(self.filemenu)
self.savefigAct = self.filemenu.addAction('Export Figure', self.saveFigure, QtGui.QKeySequence.Print)
self.savefigAct.setToolTip('Export as Figure')
self.savedatAct = self.filemenu.addAction('Export Data (ASCII)', self.saveASCII)
self.savedatAct.setToolTip('Export as text')
self.saveMatAct = self.filemenu.addAction('Export as ssNake .mat', self.saveSsNake)
self.saveMatAct.setToolTip('Export as ssNake data')
self.saveSimpAct = self.filemenu.addAction('Export as Simpson', self.saveSimpson)
self.saveSimpAct.setToolTip('Export as Simpson spectrum')
self.quitAct = self.filemenu.addAction('&Quit', self.fileQuit, QtGui.QKeySequence.Quit)
self.quitAct.setToolTip('Close Jellyfish')
self.aboutmenu = QtWidgets.QMenu('About', self)
self.menubar.addMenu(self.aboutmenu)
self.manualAct = self.aboutmenu.addAction('Manual', self.openManual)
self.manualAct.setToolTip('Open Manual')
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.ax = self.fig.gca()
self.mainFrame.addWidget(self.canvas, 0, 0)
self.mainFrame.setColumnStretch(0, 1)
self.mainFrame.setRowStretch(0, 1)
self.B0 = 14.1 # Tesla
self.Lb = 10 # Hz
self.NumPoints = 1024 * 32
self.Limits = np.array([-2.0, 8.0]) # ppm
self.RefNucleus = '1H'
self.RefFreq = 0
self.SetRefFreq()
self.Jmatrix = None
self.StrongCoupling = True
self.SpinList = []
self.Int = []
self.Freq = []
self.Jmatrix = np.zeros((len(self.SpinList), len(self.SpinList)))
self.PlotFrame = PlotFrame(self, self.fig, self.canvas)
self.settingsFrame = SettingsFrame(self)
self.mainFrame.addWidget(self.settingsFrame, 1, 0)
self.spinsysFrame = SpinsysFrame(self)
self.mainFrame.addWidget(self.spinsysFrame, 0, 1, 2, 1)
#self.sim()
def openManual(self):
file = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + 'Documentation' + os.path.sep + 'Manual.pdf'
if sys.platform.startswith('linux'):
os.system("xdg-open " + '"' + file + '"')
elif sys.platform.startswith('darwin'):
os.system("open " + '"' + file + '"')
elif sys.platform.startswith('win'):
os.startfile(file)
def setB0(self, B0):
self.settingsFrame.B0Setting.setText(str(B0))
self.settingsFrame.ApplySettings()
def SetRefFreq(self):
index = en.ABBREVLIST.index(self.RefNucleus)
self.RefFreq = en.FREQRATIOLIST[index] * en.GAMMASCALE * 1e6 * self.B0
def saveFigure(self):
f = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', 'Spectrum.png', filter='(*.png)')
if type(f) is tuple:
f = f[0]
if f:
dpi = 150
self.fig.savefig(f, format='png', dpi=dpi)
def saveASCII(self):
f = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', 'Spectrum.txt', filter='(*.txt)')
if type(f) is tuple:
f = f[0]
if f:
data = np.zeros((len(self.Axis), 2))
data[:, 0] = self.Axis
data[:, 1] = np.real(self.Spectrum)
np.savetxt(f, data)
def saveSsNake(self):
f = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', 'Spectrum.mat', filter='(*.mat)')
if type(f) is tuple:
f = f[0]
if f:
en.saveMatlabFile(self.Spectrum, self.Limits, self.RefFreq, self.Axis, f)
def saveSimpson(self):
f = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', 'Spectrum.spe', filter='(*.spe)')
if type(f) is tuple:
f = f[0]
if f:
en.saveSimpsonFile(self.Spectrum, self.Limits, self.RefFreq, f)
def fileQuit(self):
self.close()
def sim(self, ResetXAxis=False, ResetYAxis=False, recalc=True):
if len(self.SpinList) > 0:
if recalc:
spinSysList = en.expandSpinsys(self.SpinList, self.Jmatrix)
self.Freq, self.Int = en.getFreqInt(spinSysList, self.B0, self.StrongCoupling)
self.Spectrum, self.Axis, self.RefFreq = en.MakeSpectrum(self.Int, self.Freq, self.Limits, self.RefFreq, self.Lb, self.NumPoints)
else:
self.Axis = self.Limits
self.Spectrum = np.array([0, 0])
self.PlotFrame.setData(self.Axis, self.Spectrum)
if ResetXAxis:
self.PlotFrame.plotReset(xReset=True, yReset=False)
if ResetYAxis:
self.PlotFrame.plotReset(xReset=False, yReset=True)
self.PlotFrame.showFid()
if __name__ == '__main__':
root = QtWidgets.QApplication(sys.argv)
mainProgram = MainProgram(root)
mainProgram.setWindowTitle(u"Jellyfish \u2014 J-coupling simulations")
mainProgram.show()
sys.exit(root.exec_())
| gpl-3.0 |
girving/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 16 | 13781 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
WMD-group/MacroDensity | examples/PlanarAverage.py | 1 | 1084 | #! /usr/bin/env python
import macrodensity as md
import math
import numpy as np
import matplotlib.pyplot as plt
input_file = 'LOCPOT'
lattice_vector = 4.75
output_file = 'planar.dat'
# No need to alter anything after here
#------------------------------------------------------------------
# Get the potential
# This section should not be altered
#------------------------------------------------------------------
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
#------------------------------------------------------------------
## POTENTIAL
planar = md.planar_average(grid_pot,NGX,NGY,NGZ)
## MACROSCOPIC AVERAGE
macro = md.macroscopic_average(planar,lattice_vector,resolution_z)
plt.plot(planar)
plt.plot(macro)
plt.savefig('Planar.eps')
plt.show()
np.savetxt(output_file,planar)
##------------------------------------------------------------------
| mit |
rohanisaac/spectra | spectra/calibrate.py | 1 | 18376 | """
Calibrate spectrum with neon data
"""
from .peaks import find_peaks
from .fitting import fit_data, line_fit, poly_fit, fit_data_bg, fit_peaks, peak_table
from .array_help import find_nearest_tolerance
from .convert import rwn2wl, wl2rwn, rwn2wn, wl2wn
from .normalize import normalize
from .read_files import read_horiba
import peakutils as pu
import numpy as np
import matplotlib.pyplot as plt
import os
import statsmodels.api as sm
def neon_peaks(source="neon"):
"""
Print peaks
"""
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
source_peaks = np.genfromtxt(source_path, delimiter="\t")
return source_peaks
def calibrate(x, found_peaks, source="neon", tolerance=1):
"""
Uses a know set of peaks to calibrate a spectrum
Parameters
----------
x (float array)
x-data from a particular source (should be in nm) to be calibrated
found_peaks (float array)
peaks that have been found in the y data corresponding to the uncalibrated x data
source (string)
file name to get the matching data from
tolerance (float)
tolerance when searching for matching peaks
peak_width (float)
peak width when auto searching for peaks
Returns
-------
calibrated-x (float array)
x data that has been calibrated
"""
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
source_peaks = np.genfromtxt(source_path, delimiter="\t")
fpeaks = []
speaks = []
for f in found_peaks:
nearest = find_nearest_tolerance(f, source_peaks, tolerance=tolerance)
if nearest is not None:
print(f, nearest)
fpeaks.append(f)
speaks.append(nearest)
if len(set(speaks)) < len(speaks): # duplicates in data
print("Duplicate matches, reduce tolerance")
slope, inter, _ = line_fit(np.array(fpeaks), np.array(speaks), errors=False)
print("corrected: %sx + %s" % (slope, inter))
return (x * slope) + inter
def find_laser_wavelength(
neon_file,
laser_file,
tolerance=1,
offset=0,
neon_thres=0.1,
plots=False,
plot_error_scale=1000,
):
"""
1. Load in both files
2. Find peaks in Neon (list, threshold > 10, min dist > 20)
3. Find peak in laser (single, >50%)
4. Fit peaks in Neon (lmfit)
5. Fit peaks in Laser (lmfit)
6. Find expected neon peaks that are closest to the measured peaks
7. Fit (OLS) the expected vs the measured (with errors)
8. Convert the Measured Laser peak (with error) with (slope/intercept + errors) to True peak
"""
# 1
print("= Loading peaks =")
neon = read_horiba(neon_file, x="nm")
laser = read_horiba(laser_file, x="nm")
# 2, 3
neon_peaks = pu.indexes(neon["Intensity"], thres=neon_thres, min_dist=20)
laser_peak = pu.indexes(laser["Intensity"], thres=0.5, min_dist=20)
print(neon_peaks, laser_peak)
# Error checks
if len(neon_peaks) < 3:
print("May be too few peaks for current system. Check data/settings")
if len(laser_peak) > 1:
print("Too many laser peaks found. Check data/settings.")
# 4
print("\n= Fitting neon peaks =")
# Fit neon data to this model
outn = fit_data_bg(
neon["Wavelength_nm"], neon["Intensity"], neon_peaks, width=1.0, bg_ord=0
)
meas_neon_peaks = []
meas_neon_peaks_err = []
for key in outn.params:
if key.endswith("x0"):
val = outn.params[key]
print(val)
meas_neon_peaks.append(val.value)
meas_neon_peaks_err.append(val.stderr)
# 5
print("\n= Fitting laser peaks =")
# Fit laser data to this model
outl = fit_data_bg(
laser["Wavelength_nm"], laser["Intensity"], laser_peak, bg_ord=0, width=1.0
)
for key in outl.params:
if key.endswith("x0"):
val = outl.params[key]
print(val)
meas_laser_peak = val.value
meas_laser_peak_err = val.stderr
# 6
print("\n= Finding matching peaks =")
source = "neon"
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
ref_neon_peaks = np.genfromtxt(source_path, delimiter="\t")
# usable peaks
mes_peaks = []
mes_peaks_err = []
ref_peaks = []
# loop through measured peaks and try to find a match
for p, e in zip(meas_neon_peaks, meas_neon_peaks_err):
# include offset
nearest = find_nearest_tolerance(
p - offset, ref_neon_peaks, tolerance=tolerance
)
if nearest is not None:
print(p, nearest)
mes_peaks.append(p)
mes_peaks_err.append(e)
ref_peaks.append(nearest)
if len(set(ref_peaks)) < len(ref_peaks): # duplicates in data
print("Duplicate matches, reduce tolerance or add more neon references")
# 7
print("\n= Fitting data =")
# Fit with the predicting the reference with the measured
f2 = poly_fit(np.array(mes_peaks), np.array(ref_peaks), order=1)
print(f2.summary())
# 8
print("\n= Making prediction =")
# Make a prediction based on data
# put into same form as input
laser_meas = np.array([1, meas_laser_peak])
laser_pred = f2.get_prediction(
exog=laser_meas, weights=1.0 / (meas_laser_peak_err ** 2)
)
laser_pred_center = laser_pred.predicted_mean[0]
# find the relative error
# defauts to 95% confidence interval (two-sigma, 0.05); instead using 68% (one-sigma, 0.32)
laser_pred_error = np.max(
np.abs(laser_pred.conf_int(alpha=0.05) - laser_pred_center)
)
# plots
if plots:
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
ax[0].plot(neon["Wavelength_nm"], normalize(neon["Intensity"]))
ax[0].plot(laser["Wavelength_nm"], normalize(laser["Intensity"]))
ax[0].plot(
neon["Wavelength_nm"][neon_peaks],
normalize(neon["Intensity"])[neon_peaks],
"rx",
label="Neon peaks",
)
ax[0].plot(
laser["Wavelength_nm"][laser_peak],
normalize(laser["Intensity"])[laser_peak],
"b+",
label="Laser peak",
)
ax[0].set_xlabel("Wavelength (nm)")
ax[0].legend()
# scale up errors to see them
x = np.array(mes_peaks)
xe = np.array(mes_peaks_err)
yf = np.array(ref_peaks)
ax[1].errorbar(x, yf, xerr=plot_error_scale * xe, fmt="rx", label="Neon peaks")
ax[1].plot(x, f2.fittedvalues, "b", label="Data")
ax[1].errorbar(
meas_laser_peak,
laser_pred_center,
yerr=laser_pred_error * plot_error_scale,
fmt="k+",
label="Laser peak estimate",
)
ax[1].set_xlabel("Measured values (nm)")
ax[1].set_ylabel("True values (nm)")
ax[1].legend()
print("%4.4f ± %4.4f" % (laser_pred_center, laser_pred_error))
return laser_pred_center, laser_pred_error
def find_best_offset(
neon_file, tolerance=20, neon_thres=0.1, offset_range=30, plot=False
):
"""
1. Load in both files
2. Find peaks in Neon (list, threshold > 10, min dist > 20)
3. Find peak in laser (single, >50%)
4. Fit peaks in Neon (lmfit)
5. Fit peaks in Laser (lmfit)
6. Find expected neon peaks that are closest to the measured peaks
7. Fit (OLS) the expected vs the measured (with errors)
8. Convert the Measured Laser peak (with error) with (slope/intercept + errors) to True peak
"""
# 1
print("= Loading peaks =")
neon = read_horiba(neon_file, x="nm")
# 2, 3
neon_peaks = pu.indexes(neon["Intensity"], thres=neon_thres, min_dist=20)
print(neon_peaks)
# Error checks
if len(neon_peaks) < 3:
print("May be too few peaks for current system. Check data/settings")
# 4
print("\n= Fitting neon peaks =")
# Fit neon data to this model
outn = fit_data_bg(
neon["Wavelength_nm"], neon["Intensity"], neon_peaks, width=1.0, bg_ord=0
)
meas_neon_peaks = []
meas_neon_peaks_err = []
for key in outn.params:
if key.endswith("x0"):
val = outn.params[key]
print(val)
meas_neon_peaks.append(val.value)
meas_neon_peaks_err.append(val.stderr)
# 6
print("\n= Searching for best offset value =")
source = "neon"
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
ref_neon_peaks = np.genfromtxt(source_path, delimiter="\t")
offsets = np.arange(-1 * offset_range, offset_range, 0.1)
totals = []
# for each offset
for offset in offsets:
# loop through measured peaks and try to find a match
# usable peaks
total = 0
for p, e in zip(meas_neon_peaks, meas_neon_peaks_err):
# include offset
nearest = find_nearest_tolerance(
p - offset, ref_neon_peaks, tolerance=tolerance
)
if nearest is not None:
total += (nearest - (p - offset)) ** 2
totals.append(total)
if plot:
plt.semilogy(offsets, totals)
plt.xlabel("Offset value")
plt.ylabel("Total absolute difference (smaller is better)")
best_offset = offsets[np.argmin(np.array(totals))]
print("%0.1f" % best_offset)
return best_offset
def find_best_offset2(
neon_file,
laser_wl=633,
source="neon",
tolerance=20,
neon_thres=0.1,
offset_range=30,
plot=False,
):
"""
1. Load in Neon file, convert wavenumber (1/cm) to wavelength (nm)
2. Find peaks in Neon (list, threshold > 10, min dist > 20)
3. Fit peaks in Neon (lmfit)
6. Find expected neon peaks that are closest to the measured peaks
7. Fit (OLS) the expected vs the measured (with errors)
8. Convert the Measured Laser peak (with error) with (slope/intercept + errors) to True peak
"""
# 1
print("= Loading peaks =")
neon = read_horiba(neon_file)
neon_y = neon["Intensity"]
neon_x = neon["Relative_Wavenumber"]
neon_x_nm = rwn2wl(neon_x, laser_wl)
# 2
neon_peaks = pu.indexes(neon_y, thres=neon_thres, min_dist=20)
print(neon_peaks)
# Error checks
if len(neon_peaks) < 3:
print("May be too few peaks for current system. Check data/settings")
# 4
print("\n= Fitting neon peaks =")
# Fit neon data to this model
outn = fit_data_bg(neon_x_nm, neon_y, neon_peaks, width=1.0, bg_ord=0)
meas_neon_peaks = []
meas_neon_peaks_err = []
for key in outn.params:
if key.endswith("x0"):
val = outn.params[key]
print(val)
meas_neon_peaks.append(val.value)
meas_neon_peaks_err.append(val.stderr)
# 6
print("\n= Searching for best offset value =")
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
ref_neon_peaks = np.genfromtxt(source_path, delimiter="\t")
offsets = np.arange(-1 * offset_range, offset_range, 0.1)
totals = []
# for each offset
for offset in offsets:
# loop through measured peaks and try to find a match
# usable peaks
total = 0
for p, e in zip(meas_neon_peaks, meas_neon_peaks_err):
# include offset
nearest = find_nearest_tolerance(
p - offset, ref_neon_peaks, tolerance=tolerance
)
if nearest is not None:
total += (nearest - (p - offset)) ** 2
totals.append(total)
if plot:
plt.semilogy(offsets, totals)
plt.xlabel("Offset value")
plt.ylabel("Total absolute difference (smaller is better)")
best_offset = offsets[np.argmin(np.array(totals))]
print("%0.1f" % best_offset)
return best_offset
def calibrate_x_data2(
neon_file,
laser_wl,
tolerance=1,
offset=0,
neon_thres=0.1,
plots=False,
plot_error_scale=1000,
):
"""
1. Load in neon file (assume data file has the same x-data)
2. Find peaks in Neon (list, threshold > 10, min dist > 20)
3. Fit peaks in Neon (lmfit)
4. Finding matching peaks
5. Fit (OLS) the expected vs the measured (with errors)
6. Generating new x-values
"""
# 1
print("= Loading peaks =")
neon = read_horiba(neon_file)
neon_y = neon["Intensity"]
x_old = neon["Relative_Wavenumber"]
x_old_nm = rwn2wl(x_old, laser_wl)
# 2
neon_peaks = pu.indexes(neon_y, thres=neon_thres, min_dist=20)
print(neon_peaks)
# Error checks
if len(neon_peaks) < 3:
print("May be too few peaks for current system. Check data/settings")
# 3
print("\n= Fitting neon peaks =")
# Fit neon data to this model
outn = fit_data_bg(x_old_nm, neon_y, neon_peaks, width=1.0, bg_ord=0)
meas_neon_peaks = []
meas_neon_peaks_err = []
for key in outn.params:
if key.endswith("x0"):
val = outn.params[key]
print(val)
meas_neon_peaks.append(val.value)
meas_neon_peaks_err.append(val.stderr)
# 4
print("\n= Finding matching peaks =")
source = "neon"
source_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "atomic_lines", "%s.txt" % source
)
ref_neon_peaks = np.genfromtxt(source_path, delimiter="\t")
# usable peaks
mes_peaks = []
mes_peaks_err = []
ref_peaks = []
# loop through measured peaks and try to find a match
for p, e in zip(meas_neon_peaks, meas_neon_peaks_err):
# include offset
nearest = find_nearest_tolerance(
p - offset, ref_neon_peaks, tolerance=tolerance
)
if nearest is not None:
print(p, nearest)
mes_peaks.append(p)
mes_peaks_err.append(e)
ref_peaks.append(nearest)
if len(set(ref_peaks)) < len(ref_peaks): # duplicates in data
print("Duplicate matches, reduce tolerance or add more neon references")
# 5
print("\n= Fitting data =")
# Fit with the predicting the reference with the measured
f2 = poly_fit(np.array(mes_peaks), np.array(ref_peaks), order=1)
print(f2.summary())
# 6
print("\n= Generating new x-values =")
if len(neon_peaks) == 1:
# No fitting, just an offset
x_new_nm = x_old_nm + (ref_peaks[0] - mes_peaks[0])
x_new = wl2rwn(x_new_nm, laser_wl)
else:
x_new_nm = f2.get_prediction(exog=sm.add_constant(x_old_nm))
x_new = wl2rwn(x_new_nm.predicted_mean, laser_wl)
print("Difference at the start -> Old: %3.3f; New: %3.3f" % (x_old[0], x_new[0]))
print("Difference at the end -> Old: %3.3f; New: %3.3f" % (x_old[-1], x_new[-1]))
return x_new
def calibrate_neon_wavenumber(
neon_file, laser_wl, offset=0, threshold=0.1, min_dist=10, tolerance=10, plots=False
):
"""
Given a neon file and a laser wavelength, find a new, calibrated x-axis to use for that window
Options
-------
neon_file: path to neon file
laser_wl: laser wavelength (nm)
offset: offset between expected and reference neon peaks (cm-1)
threshold: threshold for peak searching, default: 0.1 (10%)
min_dist: distance between peaks (in number of points)
tolerance: tolerance (cm-1) when matching up peaks (default: 10cm-1)
plots: draw matplotlib plots
"""
# load data and convert to absolute wn
neon = read_horiba(neon_file)
x_old = neon["Relative_Wavenumber"]
exp_wn = rwn2wn(x_old, laser_wl)
# load and convert neon reference peaks to wavenumber
ref_neon = np.genfromtxt(
"/home/rohan/Cloud/python/spectra/spectra/atomic_lines/neon.txt"
)
ref_wn = wl2wn(ref_neon)
# find peaks in neon data
peaks = [
exp_wn[i]
for i in pu.indexes(neon["Intensity"], thres=threshold, min_dist=min_dist)
]
print("Found peaks: ", peaks)
out = fit_peaks(exp_wn, neon["Intensity"], peaks)
# fit peaks
out_df = peak_table([out], pandas=True)
fit_peaks_l = np.array([int(i) for i in out_df["center_v"].values])
print("Fit peaks: ", fit_peaks_l)
# Find matching peaks
mes_peaks = []
ref_peaks = []
for p in out_df["center_v"]:
# include offset
nearest = find_nearest_tolerance(p - offset, ref_wn, tolerance=tolerance)
if nearest is not None:
print("Matched peak", p, nearest)
mes_peaks.append(p)
ref_peaks.append(nearest)
if len(set(ref_peaks)) < len(ref_peaks): # duplicates in data
print("Duplicate matches, reduce tolerance or add more neon references")
print(mes_peaks, ref_peaks)
# fit expected vs ref and predict new x-vals
f2 = poly_fit(np.array(mes_peaks), np.array(ref_peaks), order=1)
# print(f2.summary())
print("\n= Generating new x-values =")
if len(ref_peaks) == 1:
# No fitting, just an offset
x_new_cm = exp_wn + (ref_peaks[0] - mes_peaks[0])
x_new = wl2wn(laser_wl) - x_new_cm
else:
x_new_cm_t = f2.get_prediction(exog=sm.add_constant(exp_wn))
x_new_cm = x_new_cm_t.predicted_mean
x_new = wl2wn(laser_wl) - x_new_cm
print("Difference at the start -> Old: %3.3f; New: %3.3f" % (x_old[0], x_new[0]))
print("Difference at the end -> Old: %3.3f; New: %3.3f" % (x_old[-1], x_new[-1]))
print(x_new_cm.shape)
offset_new = int(x_new[0] - x_old[0])
if plots:
plt.figure()
fig, ax = plt.subplots(2, 1, figsize=(12, 6))
ax[0].set_title("{} Offset: {}".format(neon_file, offset))
# convert to absolute wavenumbers as plot
ax[0].plot(exp_wn, neon["Intensity"])
for r in ref_wn:
ax[0].axvline(r, c="k", lw=0.5)
ax[1].axvline(r, c="k", lw=0.5)
ax[0].set_xlim(exp_wn[0], exp_wn[-1])
ax[1].plot(x_new_cm, neon["Intensity"])
ax[1].set_xlim(x_new_cm[0], x_new_cm[-1])
ax[1].legend(["Offset: {}".format(offset_new)])
plt.show()
return x_new
| gpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 26 | 7800 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def check_neighbors(dualtree, breadth_first, k, metric, X, Y, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, X, Y, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, Y, kdt, dens_true):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, Y, kdt, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 26 | 26904 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print(H[::-1]) # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| gpl-3.0 |
benschneider/sideprojects1 | hdf5_to_mtx/load_DCE_MAPS.py | 1 | 3292 | import numpy as np
from parsers import load_hdf5, dim
from parsers import savemtx, make_header
# import matplotlib.pyplot as plt
from changeaxis import interp_y
from scipy.constants import Boltzmann as Kb
from scipy.constants import h, e, pi
# filein = "S1_511_shot_100mV_4924_5217MHz"
# filein = "S1_514_S11_4924_5217MHz"
# filein = 'S1_478_DCE_MAP_large_coarse'
# filein = 'S1_514_DCE_MAP_high_pow'
# folder = "hdf5s//09//Data_0915//"
# filein = 'S1_515_DCE_MAP_low_pow'
# folder = "hdf5s//09//Data_0916//"
# filein = 'S1_515_DCE_MAP_700mV_pow'
# filein = 'S1_514_DCE_MAP_low_pow'
folder = "hdf5s//09//Data_0912//"
filein = 'S1_482_shot_5090_5019MHz'
d = load_hdf5(folder+filein+'.hdf5')
def get_MP(d, chnum):
compx = 1j*d.data[:, chnum+1, :]
compx += d.data[:, chnum, :]
phase = np.unwrap(zip(*np.angle(compx)))
return np.abs(compx), zip(*phase)
'''
MAT1 = np.zeros([8, d.shape[0], d.shape[1]])
MAT1[0] = d.data[:, 2, :]
MAT1[1] = d.data[:, 3, :]
MAT1[2], MAT1[3] = get_MP(d, 8)
MAT1[4], MAT1[5] = get_MP(d, 10)
MAT1[6] = d.data[:, 12, :]
MAT1[7] = d.data[:, 13, :]
'''
'''
# scale data to photon number
f1 = 4.924e9
f2 = 5.217e9
# B = 1.37e6
# B = 50e3
B = 5e6
G1 = 2.52057e+07
G2 = 2.16209e+07
MAT1[0] = MAT1[0]/(h*f1*B*G1)
MAT1[1] = MAT1[1]/(h*f2*B*G2)
# meas specific to change mag field to flux
# simply comment this paragraph out
n = 1
d.n3 = [dim(name=d.stepInst[n],
start=sPar[3],
stop=sPar[4],
pt=sPar[8],
scale=1)
for sPar in d.stepItems[n]]
d.n3 = d.n3[0]
xoff = 140.5e-3 # 139.3e-3
x1flux = 479.6e-3
d.n3.lin = (d.n3.lin-xoff)/x1flux + 0.5
d.n3.start = d.n3.lin[0]
d.n3.stop = d.n3.lin[-1]
d.n3.name = 'Flux/Flux0'
header1 = make_header(d.n3, d.n2, d.n1, meas_data=('Photons [#]'))
savemtx('mtx_out//' + filein + '.mtx', MAT1, header=header1)
factor = 10
y = (d.n2.lin*d.n2.lin/50.0) # position of the data
MAT2 = np.zeros([2, d.shape[0]*factor, d.shape[1]])
MAT2[0] =interp_y(y, MAT1[0], factor=factor)
MAT2[1] =interp_y(y, MAT1[1], factor=factor)
y2 = np.linspace(y[0], y[-1], len(y)*factor)
d.dim_y2 = d.n2
d.dim_y2.start = y2[0]
d.dim_y2.stop = y2[-1]
d.dim_y2.pt = len(y2)
d.dim_y2.lin = y2
d.dim_y2.name = 'Pump power (W)'
header2 = make_header(d.n1, d.dim_y2, d.n3, meas_data='Photons [#]')
savemtx('mtx_out//' + filein + 'W' + '.mtx', MAT2, header=header2)
# this is used if a forward and backward sweep was used...
n = 1
d.n3 = [dim(name=d.stepInst[n],
start=sPar[3],
stop=sPar[4],
pt=sPar[8],
scale=1)
for sPar in d.stepItems[n]]
d.n2[0].lin = (d.n2[0].lin-xoff)/x1flux + 0.5
d.n2[0].start = d.n2[0].lin[0]
d.n2[0].stop = d.n2[0].lin[-1]
d.n2[0].name = 'Flux/Flux0'
d.n2[1].lin = (d.n2[1].lin-xoff)/x1flux + 0.5
d.n2[1].start = d.n2[1].lin[0]
d.n2[1].stop = d.n2[1].lin[-1]
d.n2[1].name = 'Flux/Flux0'
M2 = np.zeros((MAT1.shape[0], d.n2[0].pt, d.n3.pt))
M3 = np.zeros((MAT1.shape[0], d.n2[1].pt, d.n3.pt))
M3 = MAT1[:, :d.n2[0].pt, :]
M2 = MAT1[:, d.n2[0].pt-1:, :]
header2 = make_header(d.n3, d.n2[0], d.n1, meas_data=('a.u.'))
header1 = make_header(d.n3, d.n2[1], d.n1, meas_data=('a.u.'))
savemtx('mtx_out//' + filein + '.mtx', M3, header=header1)
savemtx('mtx_out//' + filein + '2' + '.mtx', M2, header=header2)
'''
| gpl-2.0 |
squall1988/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
Marcello-Sega/pytim | pytim/observables/correlator.py | 2 | 15230 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: Correlator
==================
"""
from __future__ import print_function
import numpy as np
from pytim import utilities
from MDAnalysis.core.groups import Atom, AtomGroup, Residue, ResidueGroup
class Correlator(object):
""" Computes the (self) correlation of an observable (scalar or vector)
:param Observable observable: compute the autocorrelation of this observable.
If the observable is None and the reference
is not, the survival probability is computed.
:param bool normalize: normalize the correlation to 1 at :math:`t=0`
:param AtomGroup reference: if the group passed to the sample() function
changes its composition along the trajectory
(such as a layer group), a reference group that
includes all atoms that could appear in the
variable group must be passed, in order to
provide a proper normalization. See the example
below.
Example:
>>> import pytim
>>> import MDAnalysis as mda
>>> import numpy as np
>>> from pytim.datafiles import WATERSMALL_GRO
>>> from pytim.utilities import lap
>>> # tmpdir here is specified only for travis
>>> WATERSMALL_TRR = pytim.datafiles.pytim_data.fetch('WATERSMALL_LONG_TRR',tmpdir='./') # doctest:+ELLIPSIS
checking presence of a cached copy...
>>> u = mda.Universe(WATERSMALL_GRO,WATERSMALL_TRR)
>>> g = u.select_atoms('name OW')
>>> velocity = pytim.observables.Velocity()
>>> corr = pytim.observables.Correlator(observable=velocity)
>>> for t in u.trajectory[1:]:
... corr.sample(g)
>>> vacf = corr.correlation()
This produces the following (steps of 1 fs):
.. plot::
import pytim
import MDAnalysis as mda
import numpy as np
from pytim.datafiles import WATERSMALL_GRO
from pytim.utilities import lap
WATERSMALL_TRR=pytim.datafiles.pytim_data.fetch('WATERSMALL_LONG_TRR')
u = mda.Universe(WATERSMALL_GRO,WATERSMALL_TRR)
g = u.select_atoms('name OW')
velocity = pytim.observables.Velocity()
corr = pytim.observables.Correlator(observable=velocity)
for t in u.trajectory[1:]:
corr.sample(g)
vacf = corr.correlation()
from matplotlib import pyplot as plt
plt.plot(vacf[:1000])
plt.plot([0]*1000)
plt.show()
In order to compute the correlation for variable groups, one should proceed
as follows:
>>> corr = pytim.observables.Correlator(observable=velocity,reference=g)
>>> # notice the molecular=False switch, in order for the
>>> # layer group to be made of oxygen atoms only and match
>>> # the reference group
>>> inter = pytim.ITIM(u,group=g,alpha=2.0,molecular=False)
>>> # example only: sample longer for smooth results
>>> for t in u.trajectory[1:10]:
... corr.sample(inter.atoms)
>>> layer_vacf = corr.correlation()
In order to compute the survival probability of some atoms in a layer, it
is possible to pass observable=None together with the reference group:
>>> corr = pytim.observables.Correlator(observable=None, reference = g)
>>> inter = pytim.ITIM(u,group=g,alpha=2.0, molecular=False)
>>> # example only: sample longer for smooth results
>>> for t in u.trajectory[1:10]:
... corr.sample(inter.atoms)
>>> survival = corr.correlation()
"""
def __init__(self, universe=None, observable=None, reference=None):
self.name = self.__class__.__name__
self.observable, self.reference = observable, reference
self.timeseries, self.maskseries = [], []
self.shape = None
self.masked = False
if self.reference is not None and self.observable is not None:
self.masked = True
if reference is not None:
self._init_intermittent()
elif observable is None:
raise RuntimeError(
self.name + ': specity at least an observable or the reference'
)
def _init_intermittent(self):
if self.observable is not None:
self.reference_obs = self.observable.compute(self.reference) * 0.0
else:
self.reference_obs = np.zeros(len(self.reference), dtype=np.double)
if len(self.reference_obs.shape) > 2:
raise RuntimeError(
self.name + ' works only with scalar and vectors')
def sample(self, group):
""" Sample the timeseries for the autocorrelation function
:parameter AtomGroup group: compute the observable using this group
"""
# can be intermittent or continuous:
if self.reference is not None:
sampled = self._sample_intermittent(group)
else:
if self.observable is None:
RuntimeError(
'Cannot compute survival probability without a reference')
sampled = self.observable.compute(group)
self.timeseries.append(list(sampled.flatten()))
if self.shape is None:
self.shape = sampled.shape
def _sample_intermittent(self, group):
# we need to collect also the residence
# function
# the residence function (1 if in the reference group, 0 otherwise)
mask = np.isin(self.reference, group)
# append the residence function to its timeseries
self.maskseries.append(list(mask))
if self.observable is not None:
# this copies a vector of zeros with the correct shape
sampled = self.reference_obs.copy()
obs = self.observable.compute(group)
sampled[np.where(mask)] = obs
self.timeseries.append(list(sampled.flatten()))
else:
self.timeseries = self.maskseries
if self.shape is None:
self.shape = (1, )
sampled = mask
return sampled
def correlation(self, normalized=True, continuous=True):
""" Calculate the autocorrelation from the sampled data
:parameter bool normalized: normalize the correlation function to:
its zero-time value for regular
correlations; to the average of the
characteristic function for the
survival probability.
:parameter bool continuous: applies only when a reference group has
been specified: if True (default) the
contribution of a particle at time lag
:math:`\\tau=t_1-t_0` is considered
only if the particle did not leave the
reference group between :math:`t_0` and
:math:`t_1`. If False, the intermittent
correlation is calculated, and the
above restriction is released.
Example:
>>> # We build a fake trajectory to test the various options:
>>> import MDAnalysis as mda
>>> import pytim
>>> import numpy as np
>>> from pytim.datafiles import WATER_GRO
>>> from pytim.observables import Correlator, Velocity
>>> np.set_printoptions(suppress=True,precision=3)
>>>
>>> u = mda.Universe(WATER_GRO)
>>> g = u.atoms[0:2]
>>> g.velocities*=0.0
>>> g.velocities+=1.0
>>>
>>> # velocity autocorrelation along x, variable group
>>> vv = Correlator(observable=Velocity('x'), reference=g)
>>> nn = Correlator(reference=g) # survival probability in group g
>>>
>>> for c in [vv,nn]:
... c.sample(g) # t=0
... c.sample(g) # t=1
... c.sample(g[:1]) # t=2, exclude the second particle
... g.velocities /= 2. # from now on v=0.5
... c.sample(g) # t=3
>>>
The timeseries sampled can be accessed using:
>>> print(vv.timeseries) # rows refer to time, columns to particle
[[1.0, 1.0], [1.0, 1.0], [1.0, 0.0], [0.5, 0.5]]
>>>
>>> print(nn.timeseries)
[[True, True], [True, True], [True, False], [True, True]]
>>>
Note that the average of the characteristic function
:math:`h(t)` is done over all trajectories, including those
that start with :math:`h=0`.
The correlation :math:`\\langle h(t)h(0) \\rangle` is divided
by the average :math:`\\langle h \\rangle` computed over all
trajectores that extend up to a time lag :math:`t`. The
`normalize` switch has no effect.
>>> # normalized, continuous
>>> corr = nn.correlation()
>>> print (np.allclose(corr, [ 7./7, 4./5, 2./4, 1./2]))
True
>>> # normalized, intermittent
>>> corr = nn.correlation(continuous=False)
>>> print (np.allclose(corr, [ 7./7, 4./5, 3./4, 2./2 ]))
True
The autocorrelation functions are calculated by taking
into account in the average only those trajectory that
start with :math:`h=1` (i.e., which start within the reference
group). The normalization is done by dividing the
correlation at time lag :math:`t` by its value at time lag 0
computed over all trajectories that extend up to time
lag :math:`t` and do not start with :math:`h=0`.
>>> # not normalizd, intermittent
>>> corr = vv.correlation(normalized=False,continuous=False)
>>> c0 = (1+1+1+0.25+1+1+0.25)/7
>>> c1 = (1+1+0.5+1)/5 ; c2 = (1+0.5+0.5)/4 ; c3 = (0.5+0.5)/2
>>> print (np.allclose(corr, [ c0, c1, c2, c3]))
True
>>> # check normalization
>>> np.all(vv.correlation(continuous=False) == corr/corr[0])
True
>>> # not normalizd, continuous
>>> corr = vv.correlation(normalized=False,continuous=True)
>>> c0 = (1+1+1+0.25+1+1+0.25)/7
>>> c1 = (1+1+0.5+1)/5 ; c2 = (1+0.5)/4 ; c3 = (0.5+0.)/2
>>> print (np.allclose(corr, [ c0, c1, c2, c3]))
True
>>> # check normalization
>>> np.all(vv.correlation(continuous=True) == corr/corr[0])
True
"""
intermittent = not continuous
self.dim = self._determine_dimension()
# the standard correlation
if self.reference is None:
ts = np.asarray(self.timeseries)
corr = utilities.correlate(ts)
corr = np.average(corr, axis=1)
if normalized is True:
corr /= corr[0]
return corr
# prepare the mask for the intermittent/continuous cases
if intermittent is True:
ms = np.asarray(self.maskseries, dtype=np.double)
else: # we add Falses at the begining and at the end to ease the
# splitting in sub-trajectories
falses = [[False] * len(self.maskseries[0])]
ms = np.asarray(falses + self.maskseries + falses)
# compute the survival probabily
if self.observable is None:
return self._survival_probability(ms, normalized, intermittent)
# compute the autocorrelation function
else:
ts = np.asarray(self.timeseries)
return self._autocorrelation(ts, ms, normalized, intermittent)
def _autocorrelation(self, ts, ms, normalized, intermittent):
if intermittent is True:
corr = self._autocorrelation_intermittent(ts, ms)
else:
corr = self._autocorrelation_continuous(ts, ms)
if normalized is True:
corr = corr / corr[0]
return corr
def _survival_probability(self, ms, normalized, intermittent):
if intermittent is True:
corr = self._survival_intermittent(ms)
else:
corr = self._survival_continuous(ms)
return corr
def _survival_intermittent(self, ms):
corr = np.sum(utilities.correlate(ms, _normalize=False), axis=1)
return corr / np.sum(np.cumsum(self.timeseries, axis=0), axis=1)[::-1]
@staticmethod
def _find_edges(mask):
return np.where(mask[:-1] != mask[1:])[0]
def _survival_continuous(self, ms):
n_part = len(ms[0])
corr = np.zeros((self.nseries, n_part))
counting = (1. + np.arange(len(self.timeseries)))
for part in range(n_part):
edges = self._find_edges(ms[::, part])
deltat = edges[1::2] - edges[0::2]
# for each of the disconnected segments:
for n, dt in enumerate(deltat):
# no need to compute the correlation, we know what it is
corr[0:dt, part] += counting[:dt][::-1]
corr = np.sum(corr, axis=1)
return corr / np.sum(np.cumsum(self.timeseries, axis=0), axis=1)[::-1]
def _autocorrelation_intermittent(self, ts, ms):
dim = self.dim
corr = ts.copy()
for xyz in range(dim):
corr[:, xyz::dim] = utilities.correlate(
ts[:, xyz::dim] * ms, _normalize=False)
corr = np.sum(
corr, axis=1) / np.sum(
np.cumsum(ms, axis=0), axis=1)[::-1]
return corr
def _autocorrelation_continuous(self, ts, ms):
dim = self.dim
n_part = len(ms[0])
corr = np.zeros((int(ts.shape[0]), int(ts.shape[1]) // dim))
for part in range(n_part):
edges = self._find_edges(ms[::, part])
deltat = edges[1::2] - edges[0::2]
for n, dt in enumerate(
deltat): # for each of the disconnected segments
t1, t2 = edges[2 * n], edges[2 * n + 1]
i1, i2 = dim * part, dim * (part + 1)
corr[0:dt, part] += np.sum(
utilities.correlate(ts[t1:t2, i1:i2], _normalize=False),
axis=1)
return np.sum(
corr, axis=1) / np.sum(
np.cumsum(self.maskseries, axis=0)[::-1], axis=1)
def _determine_dimension(self):
self.nseries = max(len(self.timeseries), len(self.maskseries))
if len(self.shape) == 1:
dim = 1
elif len(self.shape) == 2:
dim = self.shape[1]
else:
raise RuntimeError(
"Correlations of tensorial quantites not allowed in " +
self.name)
return dim
| gpl-3.0 |
saiwing-yeung/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/test_html.py | 1 | 39352 | from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
import re
import threading
import numpy as np
from numpy.random import rand
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas.util.testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, network
from pandas.io.common import URLError, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
"len(list1) == {0}, "
"len(list2) == {1}".format(len(list1), len(list2))
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("lxml")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
mkdf(
4,
3,
data_gen_f=lambda *args: rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{0:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = "http://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/spam.html"
)
df1 = self.read_html(url, ".*Water.*")
df2 = self.read_html(url, "Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, ".*Florida.*", attrs={"id": "table"})
df2 = self.read_html(self.banklist_data, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, ".*Water.*")
df2 = self.read_html(self.spam_data, "Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, ".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, "Unit", skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, "Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, "Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, "Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, "Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, ".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, "Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, ".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, ".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, "Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, ".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, "Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, ".*Water.*")
df2 = self.read_html(data2, "Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, ".*Water.*")
df2 = self.read_html(data, "Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, ".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, "Unit")
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html("git://github.com", match=".*Water.*")
@network
@pytest.mark.slow
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
except ValueError as e:
assert "No tables found" in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), "First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, "Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, "Water", skiprows=-1)
@network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0, attrs={"class": "style1"})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.items())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html(
"""
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath("io", "data", "nyse_wsj.html")
df = self.read_html(data, index_col=0, header=0, attrs={"class": "mdcTable"})[0]
expected = Index(
[
"Issue(Roll over for charts and headlines)",
"Volume",
"Price",
"Chg",
"% Chg",
]
)
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, "Metcalf", attrs={"id": "table"})[0]
ground_truth = read_csv(
datapath("io", "data", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols]._convert(datetime=True, coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data, "r") as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, "Gold Canyon", attrs={"id": "table"})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath("io", "data", "computer_sales_page.html")
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(data, header=[0, 1])
data = datapath("io", "data", "computer_sales_page.html")
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "wikipedia_states.html")
assert os.path.isfile(data), "{data!r} is not a file".format(data=data)
assert os.path.getsize(data), "{data!r} is an empty file".format(data=data)
result = self.read_html(data, "Arizona", header=1)[0]
assert result["sq mi"].dtype == np.dtype("float64")
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "banklist.html")
self.read_html(banklist_data, ".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(os.path.basename(html_encoding_file))[0].split(
"_"
)
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as e:
self.err = e
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
| apache-2.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/axisartist/floating_axes.py | 18 | 22796 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
# TODO :
# *. see if tick_iterator method can be simplified by reusing the parent method.
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from . import grid_helper_curvelinear
class FloatingAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
pass
class FixedAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
value, nth_coord = grid_helper.get_data_boundary(side) # return v= 0 , nth=1, extremes of the other coordinate.
super(FixedAxisArtistHelper, self).__init__(grid_helper,
nth_coord,
value,
axis_direction=side,
)
#self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.value = value
self.grid_helper = grid_helper
self._side = side
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
self.grid_info = self.grid_helper.grid_info
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs, lat_levs = np.asarray(lon_levs), np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.001 / lat_factor
else:
yy0 = lat_levs
dy = 0.001
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.001 / lon_factor
else:
xx0 = lon_levs
dx = 0.001
_extremes = self.grid_helper._extremes
xmin, xmax = sorted(_extremes[:2])
ymin, ymax = sorted(_extremes[2:])
if self.nth_coord == 0:
mask = (ymin <= yy0) & (yy0 <= ymax)
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (xmin <= xx0) & (xx0 <= xmax)
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
#yy0_ = yy0.copy()
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>xmax] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
yy00 = yy0.copy()
yy00[yy0+dy>ymax] -= dy
xx2a, yy2a = transform_xy(xx0, yy00)
xx2b, yy2b = transform_xy(xx0, yy00+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
#xx0_ = xx0.copy()
xx1, yy1 = transform_xy(xx0, yy0)
yy00 = yy0.copy()
yy00[yy0+dy>ymax] -= dy
xx1a, yy1a = transform_xy(xx0, yy00)
xx1b, yy1b = transform_xy(xx0, yy00+dy)
xx00 = xx0.copy()
xx00[xx0+dx>xmax] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
#_mod = (d2-d1+180)%360
#if _mod < 180:
# d1 += 180
##_div, _mod = divmod(d2-d1, 360)
yield [x, y], d1, d2, lab
#, d2/3.14159*180.+da)
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
from matplotlib.path import Path
k, v = dict(left=("lon_lines0", 0),
right=("lon_lines0", 1),
bottom=("lat_lines0", 0),
top=("lat_lines0", 1))[self._side]
xx, yy = self.grid_info[k][v]
return Path(list(zip(xx, yy)))
from .grid_finder import ExtremeFinderSimple
class ExtremeFinderFixed(ExtremeFinderSimple):
def __init__(self, extremes):
self._extremes = extremes
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of division in each axis
"""
#lon_min, lon_max, lat_min, lat_max = self._extremes
return self._extremes
class GridHelperCurveLinear(grid_helper_curvelinear.GridHelperCurveLinear):
def __init__(self, aux_trans, extremes,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
self._old_values = None
self._extremes = extremes
extreme_finder = ExtremeFinderFixed(extremes)
super(GridHelperCurveLinear, self).__init__(aux_trans,
extreme_finder,
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2)
# def update_grid_finder(self, aux_trans=None, **kw):
# if aux_trans is not None:
# self.grid_finder.update_transform(aux_trans)
# self.grid_finder.update(**kw)
# self.invalidate()
# def _update(self, x1, x2, y1, y2):
# "bbox in 0-based image coordinates"
# # update wcsgrid
# if self.valid() and self._old_values == (x1, x2, y1, y2):
# return
# self._update_grid(x1, y1, x2, y2)
# self._old_values = (x1, x2, y1, y2)
# self._force_update = False
def get_data_boundary(self, side):
"""
return v= 0 , nth=1
"""
lon1, lon2, lat1, lat2 = self._extremes
return dict(left=(lon1, 0),
right=(lon2, 0),
bottom=(lat1, 1),
top=(lat2, 1))[side]
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
nth_coord_ticks=nth_coord)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
# new_floating_axis will inherit the grid_helper's extremes.
# def new_floating_axis(self, nth_coord,
# value,
# axes=None,
# axis_direction="bottom"
# ):
# axis = super(GridHelperCurveLinear,
# self).new_floating_axis(nth_coord,
# value, axes=axes,
# axis_direction=axis_direction)
# # set extreme values of the axis helper
# if nth_coord == 1:
# axis.get_helper().set_extremes(*self._extremes[:2])
# elif nth_coord == 0:
# axis.get_helper().set_extremes(*self._extremes[2:])
# return axis
def _update_grid(self, x1, y1, x2, y2):
#self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
if self.grid_info is None:
self.grid_info = dict()
grid_info = self.grid_info
grid_finder = self.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
lon_min, lon_max = sorted(extremes[:2])
lat_min, lat_max = sorted(extremes[2:])
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = lon_min, lon_max, lat_min, lat_max #extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
if lon_factor is None:
lon_values = np.asarray(lon_levs[:lon_n])
else:
lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
if lat_factor is None:
lat_values = np.asarray(lat_levs[:lat_n])
else:
lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
lon_values0 = lon_values[(lon_min<lon_values) & (lon_values<lon_max)]
lat_values0 = lat_values[(lat_min<lat_values) & (lat_values<lat_max)]
lon_lines, lat_lines = grid_finder._get_raw_grid_lines(lon_values0,
lat_values0,
lon_min, lon_max,
lat_min, lat_max)
grid_info["lon_lines"] = lon_lines
grid_info["lat_lines"] = lat_lines
lon_lines, lat_lines = grid_finder._get_raw_grid_lines(extremes[:2],
extremes[2:],
*extremes)
#lon_min, lon_max,
# lat_min, lat_max)
grid_info["lon_lines0"] = lon_lines
grid_info["lat_lines0"] = lat_lines
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon_lines"]:
grid_lines.extend([gl])
if axis in ["both", "y"]:
for gl in self.grid_info["lat_lines"]:
grid_lines.extend([gl])
return grid_lines
def get_boundary(self):
"""
return Nx2 array of x,y coordinate of the boundary
"""
x0, x1, y0, y1 = self._extremes
tr = self._aux_trans
xx = np.linspace(x0, x1, 100)
yy0, yy1 = np.empty_like(xx), np.empty_like(xx)
yy0.fill(y0)
yy1.fill(y1)
yy = np.linspace(y0, y1, 100)
xx0, xx1 = np.empty_like(yy), np.empty_like(yy)
xx0.fill(x0)
xx1.fill(x1)
xxx = np.concatenate([xx[:-1], xx1[:-1], xx[-1:0:-1], xx0])
yyy = np.concatenate([yy0[:-1], yy[:-1], yy1[:-1], yy[::-1]])
t = tr.transform(np.array([xxx, yyy]).transpose())
return t
class FloatingAxesBase(object):
def __init__(self, *kl, **kwargs):
grid_helper = kwargs.get("grid_helper", None)
if grid_helper is None:
raise ValueError("FloatingAxes requires grid_helper argument")
if not hasattr(grid_helper, "get_boundary"):
raise ValueError("grid_helper must implement get_boundary method")
self._axes_class_floating.__init__(self, *kl, **kwargs)
self.set_aspect(1.)
self.adjust_axes_lim()
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
import matplotlib.patches as mpatches
grid_helper = self.get_grid_helper()
t = grid_helper.get_boundary()
return mpatches.Polygon(t)
def cla(self):
self._axes_class_floating.cla(self)
#HostAxes.cla(self)
self.patch.set_transform(self.transData)
patch = self._axes_class_floating._gen_axes_patch(self)
patch.set_figure(self.figure)
patch.set_visible(False)
patch.set_transform(self.transAxes)
self.patch.set_clip_path(patch)
self.gridlines.set_clip_path(patch)
self._original_patch = patch
def adjust_axes_lim(self):
#t = self.get_boundary()
grid_helper = self.get_grid_helper()
t = grid_helper.get_boundary()
x, y = t[:,0], t[:,1]
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
dx = (xmax-xmin)/100.
dy = (ymax-ymin)/100.
self.set_xlim(xmin-dx, xmax+dx)
self.set_ylim(ymin-dy, ymax+dy)
_floatingaxes_classes = {}
def floatingaxes_class_factory(axes_class):
new_class = _floatingaxes_classes.get(axes_class)
if new_class is None:
new_class = type(str("Floating %s" % (axes_class.__name__)),
(FloatingAxesBase, axes_class),
{'_axes_class_floating': axes_class})
_floatingaxes_classes[axes_class] = new_class
return new_class
from .axislines import Axes
from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory
FloatingAxes = floatingaxes_class_factory(host_axes_class_factory(Axes))
import matplotlib.axes as maxes
FloatingSubplot = maxes.subplot_class_factory(FloatingAxes)
# def test(fig):
# from mpl_toolkits.axes_grid.axislines import Subplot
# ax = Subplot(fig, 111)
# fig.add_subplot(ax)
# plt.draw()
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
grid_locator1 = angle_helper.LocatorDMS(15)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
from .grid_finder import FixedLocator
grid_locator2 = FixedLocator([2, 4, 6, 8, 10])
grid_helper = GridHelperCurveLinear(tr,
extremes=(0, 360, 10, 3),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = FloatingSubplot(fig, 111, grid_helper=grid_helper)
#ax1.axis["top"].set_visible(False)
#ax1.axis["bottom"].major_ticklabels.set_axis_direction("top")
fig.add_subplot(ax1)
#ax1.grid(True)
r_scale = 10.
tr2 = Affine2D().scale(1., 1./r_scale) + tr
grid_locator2 = FixedLocator([30, 60, 90])
grid_helper2 = GridHelperCurveLinear(tr2,
extremes=(0, 360,
10.*r_scale, 3.*r_scale),
grid_locator2=grid_locator2,
)
ax1.axis["right"] = axis = grid_helper2.new_fixed_axis("right", axes=ax1)
ax1.axis["left"].label.set_text("Test 1")
ax1.axis["right"].label.set_text("Test 2")
for an in [ "left", "right"]:
ax1.axis[an].set_visible(False)
#grid_helper2 = ax1.get_grid_helper()
ax1.axis["z"] = axis = grid_helper.new_floating_axis(1, 7,
axes=ax1,
axis_direction="bottom")
axis.toggle(all=True, label=True)
#axis.label.set_axis_direction("top")
axis.label.set_text("z = ?")
axis.label.set_visible(True)
axis.line.set_color("0.5")
#axis.label.set_visible(True)
ax2 = ax1.get_aux_axes(tr)
xx, yy = [67, 90, 75, 30], [2, 5, 8, 4]
ax2.scatter(xx, yy)
l, = ax2.plot(xx, yy, "k-")
l.set_clip_path(ax1.patch)
def curvelinear_test4(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
grid_locator1 = angle_helper.LocatorDMS(5)
tick_formatter1 = angle_helper.FormatterDMS()
from .grid_finder import FixedLocator
grid_locator2 = FixedLocator([2, 4, 6, 8, 10])
grid_helper = GridHelperCurveLinear(tr,
extremes=(120, 30, 10, 0),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = FloatingSubplot(fig, 111, grid_helper=grid_helper)
#ax1.axis["top"].set_visible(False)
#ax1.axis["bottom"].major_ticklabels.set_axis_direction("top")
fig.add_subplot(ax1)
#ax1.grid(True)
ax1.axis["left"].label.set_text("Test 1")
ax1.axis["right"].label.set_text("Test 2")
for an in [ "top"]:
ax1.axis[an].set_visible(False)
#grid_helper2 = ax1.get_grid_helper()
ax1.axis["z"] = axis = grid_helper.new_floating_axis(1, 70,
axes=ax1,
axis_direction="bottom")
axis.toggle(all=True, label=True)
axis.label.set_axis_direction("top")
axis.label.set_text("z = ?")
axis.label.set_visible(True)
axis.line.set_color("0.5")
#axis.label.set_visible(True)
ax2 = ax1.get_aux_axes(tr)
xx, yy = [67, 90, 75, 30], [2, 5, 8, 4]
ax2.scatter(xx, yy)
l, = ax2.plot(xx, yy, "k-")
l.set_clip_path(ax1.patch)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test(fig)
#curvelinear_test1(fig)
curvelinear_test4(fig)
#plt.draw()
plt.show()
| bsd-2-clause |
JT5D/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 22 | 9838 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
zooniverse/aggregation | experimental/penguins/distanceAnalysis/above_or_below_average.py | 2 | 5078 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import sys
import cPickle as pickle
import math
import matplotlib.pyplot as plt
import pymongo
import urllib
import matplotlib.cbook as cbook
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
#from divisiveDBSCAN_multi import DivisiveDBSCAN
#from clusterCompare import metric,metric2
import logisticRegression
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins = pickle.load(open(base_directory+"/Databases/penguins_vote__.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
lowest_cluster = float("inf")
highest_cluster = -float('inf')
#print gold_standard
#RESET
max_users = 20
y_values = []
for image in penguins[max_users]:
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
for cluster in image[1]:
X = np.mean(zip(*cluster[0])[0])
Y = np.mean(zip(*cluster[0])[1])
y_values.append(Y)
mid_point = np.mean(y_values)
plt.plot((0,10),(mid_point/100.,mid_point/100.))
low_dist = []
high_dist = []
overall_dist = []
overall_values = []
mid_cluster = []
for image_id in range(1):#len(penguins[max_users])):
image = penguins[max_users][image_id]
above_below = []
for i in range(len(image[1])):
closest_neighbours = []
c_1 = image[1][i][0]
X_1 = np.mean(zip(*c_1)[0])
Y_1 = np.mean(zip(*c_1)[1])
mid_cluster.append((1,X_1/100.,Y_1/100.))
closest_dist = float("inf")
closest = None
#for j in range(i+1,len(low_clusters)):
for j in range(len(image[1])):
if i == j:
continue
c_2 = image[1][j][0]
X_2 = np.mean(zip(*c_2)[0])
Y_2 = np.mean(zip(*c_2)[1])
if ((Y_1 < mid_point) and (Y_2 > mid_point)) or ((Y_1 > mid_point) and (Y_2 < mid_point)):
continue
dist = math.sqrt((X_1-X_2)**2+(Y_1-Y_2)**2)
closest_neighbours.append(dist)
#plt.plot((c_1[0],closest[0]),(c_1[1],closest[1]),color="blue")
if closest_neighbours == []:
#assert(len(low_clusters) == 1)
pass
else:
closest_neighbours.sort()
vv = np.mean(closest_neighbours[0:1])
overall_values.append(vv)
high_values = []
low_values = []
overall_mean = np.mean(overall_values)
for c_1,value in zip(mid_cluster,overall_values):
if value >= overall_mean:
plt.plot((c_1[1]),(c_1[2]),"o",color= "red")
above_below.append(1)
high_values.append(value)
else:
plt.plot((c_1[1]),(c_1[2]),"o",color= "green")
above_below.append(0)
low_values.append(value)
print np.mean(high_values)
print np.mean(low_values)
# from sklearn import linear_model
from sklearn.linear_model import SGDClassifier
# #clf = linear_model.LinearRegression()
#clf = SGDClassifier(loss="hinge", alpha=0.05, n_iter=1000, fit_intercept=True)
#clf.fit(np.array(mid_cluster), np.array(above_below))
#
#
xx = np.linspace(0, 10, 50)
yy = np.linspace(0, 6, 50)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
min_p = float("inf")
max_p = -float("inf")
#print logisticRegression.cost_function((-20,-0.5,1),mid_cluster,above_below)
alpha = 0.2
theta = [-20,-0.5,1]
costs = []
for i in range(2000):
#print i
t1_temp = theta[0] - alpha*logisticRegression.partial_cost_function(theta,mid_cluster,above_below,0)
t2_temp = theta[1] - alpha*logisticRegression.partial_cost_function(theta,mid_cluster,above_below,1)
t3_temp = theta[2] - alpha*logisticRegression.partial_cost_function(theta,mid_cluster,above_below,2)
theta = [t1_temp,t2_temp,t3_temp]
costs.append(logisticRegression.cost_function(theta,mid_cluster,above_below))
#plt.plot(range(len(costs)),costs)
#plt.show()
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
#p = clf.predict([x1, x2])
p = logisticRegression.hypothesis(theta,(1,x1,x2))
#if x2/600. < 0.5:
# print x2/600.
# print p
#min_p = min(min_p,p)
#max_p= max(min_p,p)
#if math.fabs(0.5-p) < 0.01:
# print p
Z[i, j] = p
#print min_p,max_p
#print clf.coef_
plt.contour(X1, X2, Z, [0.5], colors="blue")
# # #plt.plot(range(500),clf.predict(range(500)),color="blue")
#plt.xlim(0,10)
#plt.ylim(0,10)
plt.show()
#print mid_cluster
#print above_below | apache-2.0 |
idlead/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
xaccrocheur/beatnitpicker | beatnitpicker.py | 1 | 23337 | #!/usr/bin/python
import os, sys, gobject, stat, time, re
import gtk
import gst, gst.pbutils
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
import scipy.io.wavfile as wavfile
import wave
import numpy as np
license = """
BeatNitPicker is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2.
This program is distributed in the hope that it will be useful,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA)
"""
menu = """
<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="Properties"/>
<menuitem action="Quit"/>
</menu>
<menu action="Edit">
<menuitem action="Preferences"/>
</menu>
<menu action="Help">
<menuitem action="About"/>
</menu>
</menubar>
</ui>
"""
if len(sys.argv) > 1:
clipath = str(sys.argv[1])
else:
clipath = False
def bytestomegabytes(bytes):
return (bytes / 1024) / 1024
def k_to_m(num):
for x in ['bytes','KB','MB','GB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
def _wav2array(nchannels, sampwidth, data):
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
"""
Read a wav file.
Returns the frame rate, sample width (in bytes) and a numpy array
containing the data.
This function does not read compressed wav files.
"""
wav = wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
class GUI(object):
column_names = ["Name", "Size", "Mode", "Last Changed"]
audioFormats = [ ".wav", ".mp3", ".ogg", ".flac", ".MP3", ".FLAC", ".OGG", ".WAV", "wma" ]
def __init__(self, dname = None):
if clipath:
dname = clipath
else:
dname = None
self.window = gtk.Window()
self.window.set_size_request(550, 600)
self.window.connect("delete_event", self.on_destroy)
self.window.set_icon(gtk.icon_theme_get_default().load_icon("gstreamer-properties", 128, 0))
# lister
cell_data_funcs = (None, self.file_size, self.file_mode,
self.file_last_changed)
self.list_store = self.make_list(dname)
self.treeview = gtk.TreeView()
self.treeview.set_enable_search(True)
self.treeview.set_search_column(0)
# self.treeview.set_activate_on_single_click(True)
self.tvcolumn = [None] * len(self.column_names)
cellpb = gtk.CellRendererPixbuf()
self.tvcolumn[0] = gtk.TreeViewColumn(self.column_names[0], cellpb)
self.tvcolumn[0].set_cell_data_func(cellpb, self.file_pixbuf)
cell = gtk.CellRendererText()
self.tvcolumn[0].pack_start(cell, False)
self.tvcolumn[0].set_cell_data_func(cell, self.file_name)
self.treeview.append_column(self.tvcolumn[0])
for n in range(1, len(self.column_names)):
cell = gtk.CellRendererText()
self.tvcolumn[n] = gtk.TreeViewColumn(self.column_names[n], cell)
# make it searchable (does NOT work, please help)
# self.treeview.set_search_column(True)
# Allow sorting on the column (does NOT work, please help)
self.tvcolumn[n].set_sort_column_id(n)
if n == 1:
cell.set_property('xalign', 1.0)
self.tvcolumn[n].set_cell_data_func(cell, cell_data_funcs[n])
self.treeview.append_column(self.tvcolumn[n])
self.treeview.set_model(self.list_store)
self.tree_selection = self.treeview.get_selection()
self.tree_selection.set_mode(gtk.SELECTION_SINGLE)
# self.tree_selection.connect("changed", self.open_file)
# player
self.label = gtk.Label()
self.label.set_alignment(0,0.5)
self.label.set_markup("<b> </b>\n \n ")
self.label.set_line_wrap(True)
self.slider = gtk.HScale()
self.toggle_button = gtk.ToggleButton(None)
self.next_button = gtk.Button(None)
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_BUTTON))
self.next_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_NEXT, gtk.ICON_SIZE_BUTTON))
self.buttons_hbox = gtk.HBox(False, 5)
self.slider_hbox = gtk.HBox()
self.slider.set_range(0, 100)
self.slider.set_increments(1, 10)
self.buttons_hbox.pack_start(self.toggle_button, False)
self.buttons_hbox.pack_start(self.next_button, False)
self.buttons_hbox.pack_start(self.label, False)
self.slider_hbox.pack_start(self.slider, True, True)
self.playbin = gst.element_factory_make('playbin2')
self.bus = self.playbin.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_finish)
self.is_playing = False
# end player
# UI
scroll_list = gtk.ScrolledWindow()
scroll_list.add(self.treeview)
uimanager = gtk.UIManager()
accelgroup = uimanager.get_accel_group()
self.window.add_accel_group(accelgroup)
self.actiongroup = gtk.ActionGroup("uimanager")
self.actiongroup.add_actions([
("Properties", gtk.STOCK_PROPERTIES, "_Properties", None, "File info", self.file_properties_dialog),
("Quit", gtk.STOCK_QUIT, "_Quit", None, "Quit the Application", lambda w: gtk.main_quit()),
("File", None, "_File"),
("Preferences", gtk.STOCK_PREFERENCES, "_Preferences", None, "Edit the Preferences"),
("Edit", None, "_Edit"),
("About", gtk.STOCK_ABOUT, "_About", None, "yow", self.about_box),
("Help", None, "_Help")
])
uimanager.insert_action_group(self.actiongroup, 0)
uimanager.add_ui_from_string(menu)
menubar = uimanager.get_widget("/MenuBar")
# Connects
self.toggle_button.connect("toggled", self.toggle_play, None, "current", self.treeview, self.tree_selection)
self.next_button.connect("clicked", self.toggle_play, None, "next", self.treeview, self.tree_selection)
self.slider.connect('value-changed', self.on_slider_change)
self.treeview.connect('row-activated', self.open_file)
# self.treeview.connect("cursor-changed", self.toggle_play, None, "current")
# Packs
self.mainbox = gtk.VBox()
self.plot_inbox = gtk.HBox(True, 0)
self.plot_outbox = gtk.VBox(True, 0)
self.plot_outbox.pack_start(self.plot_inbox, True, True, 0)
self.plot_outbox.set_size_request(200, 60)
self.mainbox.pack_start(menubar, False)
self.mainbox.pack_start(self.plot_outbox, False, False, 1)
self.mainbox.pack_start(self.slider_hbox, False, False, 1)
self.mainbox.pack_start(self.buttons_hbox, False, False, 1)
self.mainbox.pack_start(scroll_list, True, True, 1)
self.window.add(self.mainbox)
self.window.show_all()
self.treeview.grab_focus()
return
def _wav2array(nchannels, sampwidth, data):
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def get_info(self, filename, element=None):
newitem = gst.pbutils.Discoverer(50000000000)
info = newitem.discover_uri("file://" + filename)
tags = info.get_tags()
tag_string = ""
if element:
for tag_name in list(tags.keys()):
if tag_name == element:
tag_string += " " + str(tags[tag_name]) + '\r\n'
return tag_string
else:
for tag_name in list(tags.keys()):
if tag_name != "image":
tag_string += tag_name + " : " + str(tags[tag_name]) + '\r\n'
return tag_string
def file_properties_dialog(self, widget):
filename = self.get_selected_tree_row(self)
def on_info(self, widget):
md = gtk.MessageDialog(None,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, "Select something")
md.run()
md.destroy()
if not filename:
on_info(self, widget)
if filename.endswith(tuple(self.audioFormats)):
title = os.path.basename(filename)
text = self.get_info(filename)
else:
title = os.path.basename(filename)
text = "##", filename, "is not an audio file"
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_NONE, title)
dialog.set_title("BeatNitPicker audio file info")
dialog.format_secondary_text("Location :" + filename + '\r' + str(text))
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
dialog.connect('destroy', lambda w: dialog.destroy())
if filename.endswith(".wav") or filename.endswith(".WAV"):
pa = self.plotter(filename, "waveform", "full")
pa.set_size_request(350, 200)
dialog.vbox.pack_start(pa)
dialog.show_all()
dialog.run()
dialog.destroy()
def about_box(self, widget):
about = gtk.AboutDialog()
about.set_program_name("BeatNitPicker")
about.set_version("0.2")
about.set_copyright("(c) Philippe \"xaccrocheur\" Coatmeur")
about.set_comments("Simple sound sample auditor")
about.set_website("https://github.com/xaccrocheur")
about.set_logo(gtk.icon_theme_get_default().load_icon("gstreamer-properties", 128, 0))
about.set_license(license)
about.run()
about.destroy()
def open_file(self, treeview, path, button, *args):
model = treeview.get_model()
iter = model.get_iter(path)
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
if stat.S_ISDIR(filestat.st_mode):
new_model = self.make_list(filename)
treeview.set_model(new_model)
elif filename.endswith(tuple(self.audioFormats)):
self.toggle_play(self, filename, "current", None, None)
else:
print("##", filename, "is not an audio file")
def get_selected_tree_row(self, *args):
treeview = self.treeview
selection = treeview.get_selection()
(model, pathlist) = selection.get_selected_rows()
slider_position = self.slider.get_value()
for path in pathlist :
iter = model.get_iter(path)
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
if stat.S_ISDIR(filestat.st_mode):
print(filename, "is a directory")
elif filename.endswith(tuple(self.audioFormats)):
return filename
else:
print("##", filename, "is not an audio file")
def get_next_tree_row(self, *args):
treeview = self.treeview
selection = treeview.get_selection()
(model, pathlist) = selection.get_selected_rows()
# print "Model: ", model, "Pathlist: ", pathlist, "Longueur: ", pathlist.count()
slider_position = self.slider.get_value()
for path in pathlist :
iter = model.get_iter(path)
next_iter = model.iter_next(iter)
filename = os.path.join(self.dirname, model.get_value(iter, 0))
next_filename = os.path.join(self.dirname, model.get_value(next_iter, 0))
filestat = os.stat(next_filename)
current = filename
if stat.S_ISDIR(filestat.st_mode):
# print next_filename, "is a ddirectory"
# next_filename = self.get_next_tree_row(self)
# print "current", current
# print "next", next_filename
selection.select_iter(next_iter)
# pass
# if next_filename != current:
elif next_filename.endswith(tuple(self.audioFormats)):
return next_filename
selection.select_iter(next_iter)
else:
print("##", next_filename, "is not an audio file")
def toggle_play(self, button, filename, position, tv, selection):
if position == "current":
# print "current", self.get_next_tree_row(self)
pass
# if not self.get_selected_tree_row(self):
# return
if filename:
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_BUTTON))
self.player(self, filename)
else:
filename = self.get_selected_tree_row(self)
slider_position = self.slider.get_value()
if self.is_playing:
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_BUTTON))
self.is_playing = False
self.playbin.set_state(gst.STATE_PAUSED)
else:
if slider_position > 0.0:
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_BUTTON))
self.playbin.set_state(gst.STATE_PLAYING)
gobject.timeout_add(100, self.update_slider)
self.is_playing = True
else:
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_BUTTON))
self.player(self, filename)
self.is_playing = True
re.search('(?<=abc)def', 'abcdef')
audio_codec_tag = self.get_info(filename, "audio-codec")
self.label.set_markup("<b> " + os.path.basename(filename) + "</b>\n" + audio_codec_tag)
else:
(model, pathlist) = selection.get_selected_rows()
for path in pathlist :
iter = model.get_iter(path)
next_iter = model.iter_next
filename = self.get_next_tree_row(self)
if filename:
myint = reduce(lambda rst, d: rst * 10 + d, path)
selection.unselect_path(myint)
selection.select_path(myint + 1)
print "Filename: ", filename
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_BUTTON))
self.player(self, filename)
self.is_playing = True
else:
print "NO Filename"
pass
def player(self, button, filename):
self.plot_outbox.remove(self.plot_inbox)
self.playbin.set_state(gst.STATE_READY)
self.playbin.set_property('uri', 'file:///' + filename)
self.is_playing = True
self.playbin.set_state(gst.STATE_PLAYING)
gobject.timeout_add(100, self.update_slider)
self.vp = gtk.Viewport()
self.plot_inbox = gtk.HBox()
self.mylabel = gtk.Label("No Viz")
readable = False
try:
f = open(filename, 'r')
w = wavfile.read(open(filename, 'r'))
readable = True
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
readable = False
except ValueError:
print "Error opening file for plotting: Will not draw waveform."
readable = False
except:
print "Unexpected error:", sys.exc_info()[0]
readable = False
raise
if readable:
self.pa = self.plotter(filename, "waveform", "neat")
self.plot_inbox.pack_start(self.pa)
else:
self.plot_inbox.pack_start(self.mylabel)
self.plot_outbox.pack_start(self.plot_inbox, True, True, 0)
self.window.show_all()
def plotter(self, filename, plot_type, plot_style):
rate, data = wavfile.read(open(filename, 'r'), True)
# rate, data, array = readwav(filename)
# print("Rate: ", rate)
f = Figure(facecolor = 'w')
f.patch.set_alpha(1)
a = f.add_subplot(111, axisbg='w')
if plot_type == "waveform":
a.plot(list(range(len(data))),data, color="OrangeRed", linewidth=0.5, linestyle="-")
a.axhline(0, color='DimGray', lw=1)
a.set_xticklabels(["", ""])
a.set_yticklabels(["", ""])
if plot_style == "neat":
f.subplots_adjust(0, 0, 1, 1)
a.axis('off')
canvas = FigureCanvas(f) # a gtk.DrawingArea
return canvas
# Lister funcs
def lister_compare(self, model, row1, row2, user_data):
sort_column, _ = model.get_sort_column_id()
value1 = model.get_value(row1, sort_column)
value2 = model.get_value(row2, sort_column)
if value1 < value2:
return -1
elif value1 == value2:
return 0
else:
return 1
def make_list(self, dname=None):
if not dname:
self.dirname = os.path.expanduser('~')
else:
self.dirname = os.path.abspath(dname)
self.window.set_title(self.dirname + " - BNP")
files = [f for f in os.listdir(self.dirname) if f[0] != '.']
files.sort()
files = ['..'] + files
list_store = gtk.ListStore(object)
for f in files:
list_store.append([f])
return list_store
def file_pixbuf(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
if stat.S_ISDIR(filestat.st_mode):
pb = gtk.icon_theme_get_default().load_icon("folder", 24, 0)
elif filename.endswith(tuple(self.audioFormats)):
pb = gtk.icon_theme_get_default().load_icon("audio-volume-medium", 24, 0)
else:
pb = gtk.icon_theme_get_default().load_icon("edit-copy", 24, 0)
cell.set_property('pixbuf', pb)
return
def file_name(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 0))
return
def file_size(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
# print("Size: ", bytestomegabytes(filestat.st_size), "Mb")
size = str(k_to_m(filestat.st_size))
cell.set_property('text', size)
return
def file_mode(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
cell.set_property('text', oct(stat.S_IMODE(filestat.st_mode)))
return
def file_last_changed(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
cell.set_property('text', time.ctime(filestat.st_mtime))
return
# player funcs
def on_finish(self, bus, message):
self.playbin.set_state(gst.STATE_PAUSED)
self.is_playing = False
self.playbin.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, 0)
self.slider.set_value(0)
self.toggle_button.set_property("image", gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_BUTTON))
def on_destroy(self, *args):
self.playbin.set_state(gst.STATE_NULL)
self.is_playing = False
gtk.main_quit()
def on_slider_change(self, slider):
seek_time_secs = self.slider.get_value()
self.playbin.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_KEY_UNIT, seek_time_secs * gst.SECOND)
def update_slider(self):
if not self.is_playing:
return False # cancel timeout
try:
self.nanosecs, format = self.playbin.query_position(gst.FORMAT_TIME)
self.duration_nanosecs, format = self.playbin.query_duration(gst.FORMAT_TIME)
# block seek handler so we don't seek when we set_value()
self.slider.handler_block_by_func(self.on_slider_change)
self.slider.set_range(0, float(self.duration_nanosecs) / gst.SECOND)
self.slider.set_value(float(self.nanosecs) / gst.SECOND)
self.slider.handler_unblock_by_func(self.on_slider_change)
except gst.QueryError:
# pipeline must not be ready and does not know position
pass
return True # continue calling every 30 milliseconds
def main():
gtk.main()
if __name__ == "__main__":
GUI()
main()
| gpl-2.0 |
aewhatley/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
newemailjdm/scipy | scipy/stats/_multivariate.py | 35 | 69253 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/computation/engines.py | 15 | 3799 | """
Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas import compat
from pandas.compat import map
import pandas.io.formats.printing as printing
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
UndefinedVariableError,
_mathops, _reductions)
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "{expr}" '
'overlap with builtins: ({s})'
.format(expr=expr, s=s))
class AbstractEngine(object):
"""Object serving as a base class for all engines."""
__metaclass__ = abc.ABCMeta
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super(NumExprEngine, self).__init__(expr)
def convert(self):
return str(super(NumExprEngine, self).convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = compat.text_type(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super(PythonEngine, self).__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| mit |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 24 | 6776 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
m4rx9/rna-pdb-tools | rna_tools/tools/rna_calc_rmsd_trafl/rna_cal_rmsd_trafl_plot.py | 2 | 1331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""rna_cal_rmsd_trafl_plot - generate a plot based of <rmsd.txt> of rna_calc_evo_rmsd.py."""
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series, DataFrame
plt.style.use('ggplot')
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4, threshold=500)
pd.options.display.max_rows = 100
def get_parser():
"""Get parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('file', help="rmsd.txt")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
fn = args.file
df = pd.read_csv(fn, sep="\t")
print(df.head())
print(len(df)) # print len(df) #
# plt.style.use('classic')
ax = df.plot(x=df.columns[0], y=df.columns[1],
legend=False,
marker="o",
clip_on=False) # (x='fn', y='rmsd')
ax.set_ylabel(df.columns[1])
plt.ylim(-0, df[df.columns[1]].max() + 1)
plt.xlim(-0, df[df.columns[0]].max() + 1)
outfn = args.file.replace('.txt', '') + '.png'
print('Save plot %s' % outfn)
plt.savefig(outfn)
| mit |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/tests/estimators_test.py | 5 | 3169 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
# TODO(b/29580537): Remove when we deprecate feature column inference.
class InferredfeatureColumnTest(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer(learning_rate):
return tf.train.MomentumOptimizer(learning_rate, 0.9)
classifier = learn.TensorFlowDNNClassifier(
hidden_units=[10, 20, 10],
n_classes=3,
steps=400,
learning_rate=0.01,
optimizer=custom_optimizer)
classifier.fit(x_train, y_train)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
class CustomOptimizer(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer(learning_rate):
return tf.train.MomentumOptimizer(learning_rate, 0.9)
classifier = learn.TensorFlowDNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
steps=400,
learning_rate=0.01,
optimizer=custom_optimizer)
classifier.fit(x_train, y_train)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
timole/solitadds-backend | main.py | 1 | 2485 | #!/usr/bin/python
import sys, re, pdb, os
import logging
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib, datetime
import utils, data_helper
import analyze
def parse_args():
"""
Parse command line args.
Example
-------
python main.py --input-file-operative ../data/small/some-applications-operative-pub-20161031.csv --input-file-usage ../data/small/some-lupapiste-usage-pub-20161031.csv --output-file-applications ../target/application-summary.csv --output-file-users ../target/user-summary.csv
"""
parser = argparse.ArgumentParser(description='SOLITADDS analysis')
parser.add_argument('-io', '--input-file-operative', help='Input CSV file for operative data', required = False, default = os.getcwd() + "/test-data/some-applications-operative-pub-20161031.csv")
parser.add_argument('-iu', '--input-file-usage', help='Input CSV file for usage data', required = False, default = os.getcwd() + "/test-data/some-lupapiste-usage-pub-20161031.csv")
parser.add_argument('-oa', '--output-file-applications', help='Output CSV file for applications', required = False, default = os.getcwd() + "summary-applications.csv")
parser.add_argument('-ou', '--output-file-users', help='Output CSV file for users', required=False, default = os.getcwd() + "summary-users.csv")
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
pd.set_option('display.width', 240)
args = parse_args()
input_file_operative = args['input_file_operative']
input_file_usage = args['input_file_usage']
output_file_applications = args['output_file_applications']
output_file_users = args['output_file_users']
analysis_start_time = datetime.datetime.now()
odf = data_helper.import_operative_data(input_file_operative)
udf = data_helper.import_usage_data(input_file_usage)
print("Total number of apps: {}".format(len(odf)))
print("Total number of events: {} with time range from {} to {} ".format(len(udf), udf['datetime'].min(), udf['datetime'].max()))
application_summary = analyze.summarize_applications(odf, udf)
application_summary.to_csv(output_file_applications, sep=';', encoding='utf-8')
user_summary = analyze.summarize_users(odf, udf)
user_summary.to_csv(output_file_users, sep=';', encoding='utf-8')
print("Analysis took {} seconds".format(datetime.datetime.now() - analysis_start_time))
| mit |
fredrikw/scipy | scipy/interpolate/fitpack.py | 25 | 46138 | #!/usr/bin/env python
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('K nots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\pi/2 + n\pi` of
:math:`\cos(x) = \sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
dt = t[k+1:-1] - t[1:-k-1]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, [0]*k]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt) / (k + 1)
c = np.r_[0, c, [c[-1]]*(k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| bsd-3-clause |
SRI-CSL/libpoly | examples/cad/plot.py | 1 | 1180 | #!/usr/bin/env python
import polypy
import cad
import matplotlib.pyplot as plt
# 2D plotting of polynomials
class PolyPlot2D(cad.CylinderNotify):
# Initialize
def __init__(self, x, y):
self.x = x
self.y = y
self.cad = cad.CAD([x, y])
self.polynomials = []
self.cylinders = []
# Add a polynomial
def add_polynomial(self, f, sign_condition):
self.polynomials.append((f, sign_condition))
# Show the plot
def show(self):
# Run the CAD and collect cyllinders
mycad = cad.CAD([x, y])
mycad.cylinder_notify = self
for (f, sign_condition) in self.polynomials:
mycad.add_polynomial(f, sign_condition)
mycad.run()
# Notifications on sylinders
def cylinder_notify(self, cylinder, assignment):
self.cylinders.append(cylinder)
print("Cylinder:\n", cylinder)
if __name__ == "__main__":
# Some variables
x = polypy.Variable("x");
y = polypy.Variable("y");
# Setup
plot = PolyPlot2D(x, y)
plot.add_polynomial(x**2 + y**2 - 1, polypy.SGN_EQ_0)
# Show
plot.show()
| lgpl-3.0 |
probml/pyprobml | scripts/kernelRegressionDemo.py | 1 | 1894 | import numpy as np
from scipy.spatial.distance import cdist
import math
import matplotlib.pyplot as plt
from cycler import cycler
import pyprobml_utils as pml
CB_color = ['#377eb8', '#ff7f00', '#4daf4a']
cb_cycler = (cycler(linestyle=['-', '--', '-.']) * cycler(color=CB_color))
plt.rc('axes', prop_cycle=cb_cycler)
np.random.seed(0)
N = 100
x = 10 * (np.linspace(-1, 1, 100).reshape(-1, 1))
ytrue = np.array([math.sin(abs(el)) / (abs(el)) for el in x]).reshape(-1, 1)
noise = 0.1
y = ytrue + noise * np.random.randn(N, 1)
x = (x - x.mean()) / x.std() # normalizing.
plt.plot(x, ytrue)
plt.plot(x, y, 'kx')
def rbf_features(X, centers, sigma):
dist_mat = cdist(X, centers, 'minkowski', p=2.)
return np.exp((-0.5 / (sigma ** 2)) * (dist_mat ** 2))
# Nadaraya-Watson Kernel Regressor
# using rbf kernel with autosSelected bandwidth given a range.
class NdwkernelReg:
def __init__(self, gammas=None):
self.gammas = gammas
self.gamma = None
def fit(self, X, y):
self.X = X
self.y = y
self.gamma = self.select_gamma(self.gammas)
def predict(self, X):
K = rbf_features(self.X, X, self.gamma)
return (K * self.y).sum(axis=0) / K.sum(axis=0)
# leave-one-out crossValidation
def select_gamma(self, gammas):
mse = []
for gamma in gammas:
K = rbf_features(self.X, self.X, gamma)
K = K - np.diag(np.diag(K)) # vanishing the diagonal elements
y_pred = (K * self.y).sum(axis=0) / K.sum(axis=0)
mse.append(((y_pred[:, np.newaxis] - self.y) ** 2).mean())
return gammas[np.argmin(mse)]
nws = NdwkernelReg(gammas=np.linspace(0.1, 1, 10))
nws.fit(x, y)
y_estimate = nws.predict(x)
plt.plot(x, y_estimate)
plt.legend(['true', 'data', 'estimate'])
plt.title('Gaussian kernel regression')
pml.savefig("kernelRegressionDemo.pdf")
plt.show()
| mit |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_template.py | 70 | 8806 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. Thus syntax is also
recognized in the rc file and in the -d argument in pylab, eg::
python simple_plot.py -dmodule://my_backend
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import division
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, master_transform, cliprect, clippath,
# clippath_trans, paths, all_transforms, offsets,
# offsetTrans, facecolors, edgecolors, linewidths,
# linestyles, antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, master_transform, cliprect, clippath,
# clippath_trans, meshWidth, meshHeight, coordinates,
# offsets, offsetTrans, facecolors, antialiased,
# showedges):
# pass
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasTemplate(thisFig)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
eg backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerTemplate
| agpl-3.0 |
krez13/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
dataculture/pysemantic | pysemantic/validator.py | 2 | 44008 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the BSD 3-clause license.
"""Traited Data validator for `pandas.DataFrame` objects."""
import copy
import cPickle
import json
import logging
import textwrap
import warnings
import os.path as op
import yaml
import numpy as np
import pandas as pd
from pandas.io.parsers import ParserWarning
from pandas.parser import CParserError
from traits.api import (HasTraits, File, Property, Str, Dict, List, Type,
Bool, Either, push_exception_handler, cached_property,
Instance, Float, Any, TraitError)
from pysemantic.utils import TypeEncoder, get_md5_checksum, colnames
from pysemantic.custom_traits import AbsFile, ValidTraitList
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper, Loader
push_exception_handler(lambda *args: None, reraise_exceptions=True)
logger = logging.getLogger(__name__)
class ParseErrorHandler(object):
def __init__(self, parser_args, project, maxiter=None):
self.parser_args = parser_args
self.project = project
fpath = self.parser_args.get('filepath_or_buffer',
self.parser_args.get('io'))
sep = self.parser_args.get('sep', False)
if sep:
self.colnames = colnames(fpath, sep=sep)
else:
self.colnames = colnames(fpath)
self.parser_args['sep'] = sep
if maxiter is not None:
self.maxiter = maxiter
else:
self.maxiter = len(self.colnames)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def _update_parser(self, argdict):
"""Update the pandas parser based on the delimiter.
:param argdict: Dictionary containing parser arguments.
:return None:
"""
fpath = argdict.get('filepath_or_buffer', argdict.get('io'))
xls = fpath.endswith(".xlsx") or fpath.endswith("xls")
if not self.project.user_specified_parser:
if not xls:
sep = argdict.get('sep', ",")
if sep == ",":
self.parser = pd.read_csv
else:
self.parser = pd.read_table
if sep == r'\t':
argdict.pop('sep', None)
else:
self.parser = self._load_excel_sheet
def _load_excel_sheet(self, **parser_args):
sheetname = parser_args.pop("sheetname")
io = parser_args.pop('io')
return pd.read_excel(io, sheetname=sheetname, **parser_args)
def _update_dtypes(self, dtypes, typelist):
"""Update the dtypes parameter of the parser arguments.
:param dtypes: The original column types
:param typelist: List of tuples [(column_name, new_dtype), ...]
"""
for colname, coltype in typelist:
dtypes[colname] = coltype
def _detect_row_with_na(self):
"""Return the list of columns in the dataframe, for which the data type
has been marked as integer, but which contain NAs.
:param parser_args: Dictionary containing parser arguments.
"""
dtypes = self.parser_args.get("dtype")
usecols = self.parser_args.get("usecols")
if usecols is None:
usecols = colnames(self.parser_args['filepath_or_buffer'])
int_cols = [col for col in usecols if dtypes.get(col) is int]
fpath = self.parser_args['filepath_or_buffer']
sep = self.parser_args.get('sep', ',')
nrows = self.parser_args.get('nrows')
na_reps = {}
if self.parser_args.get('na_values', False):
for colname, na_vals in self.parser_args.get(
'na_values').iteritems():
if colname in int_cols:
na_reps[colname] = na_vals
converters = {}
if self.parser_args.get('converters', False):
for cname, cnv in self.parser_args.get('converters').iteritems():
if cname in int_cols:
converters[cname] = cnv
df = self.parser(fpath, sep=sep, usecols=int_cols, nrows=nrows,
na_values=na_reps, converters=converters)
bad_rows = []
for col in df:
if np.any(pd.isnull(df[col])):
bad_rows.append(col)
return bad_rows
def _detect_mismatched_dtype_row(self, specified_dtype):
"""Check the dataframe for rows that have a badly specified dtype.
:param specfified_dtype: The datatype specified in the schema
:param parser_args: Dictionary containing parser arguments.
"""
to_read = []
dtypes = self.parser_args.get("dtype")
for key, value in dtypes.iteritems():
if value is specified_dtype:
to_read.append(key)
fpath = self.parser_args['filepath_or_buffer']
sep = self.parser_args.get('sep', ',')
nrows = self.parser_args.get('nrows')
df = self.parser(fpath, sep=sep, usecols=to_read, nrows=nrows,
error_bad_lines=False)
bad_cols = []
for col in df:
try:
df[col] = df[col].astype(specified_dtype)
except ValueError:
bad_cols.append(col)
msg = textwrap.dedent("""\
The specified dtype for the column '{0}' ({1}) seems to be
incorrect. This has been ignored for now.
Consider fixing this by editing the schema.""".format(col,
specified_dtype))
warnings.warn(msg, UserWarning)
return bad_cols
def _remove_unsafe_integer_columns(self, loc):
bad_col = self.colnames[loc]
del self.parser_args['dtype'][bad_col]
def _detect_column_with_invalid_literals(self):
dtypes = self.parser_args.pop('dtype')
df = self.parser(**self.parser_args)
bad_cols = []
for colname, dtype in dtypes.iteritems():
try:
df[colname].astype(dtype)
except (ValueError, TypeError):
bad_cols.append(colname)
except KeyError:
if df.index.name == colname:
bad_cols.append(colname)
self.parser_args['dtype'] = dtypes
return bad_cols
def load(self):
"""The main recursion loop."""
self.c_iter = 0
df = None
while True:
df = self._load()
self.c_iter += 1
if (self.c_iter > self.maxiter) or (df is not None):
break
return df
def _load(self):
"""The actual loader function that does the heavy lifting.
:param parser_args: Dictionary containing parser arguments.
"""
self._update_parser(self.parser_args)
try:
return self.parser(**self.parser_args)
except ValueError as e:
if "Integer column has NA values" in e.message:
bad_rows = self._detect_row_with_na()
new_types = [(col, float) for col in bad_rows]
self._update_dtypes(self.parser_args['dtype'], new_types)
logger.info("Dtypes for following columns changed:")
logger.info(json.dumps(new_types, cls=TypeEncoder))
return self.parser(**self.parser_args)
elif e.message.startswith("invalid literal"):
bad_cols = self._detect_column_with_invalid_literals()
msg = textwrap.dedent("""\
Columns {} designated as type integer could not
be safely cast as integers. Attempting to load as
string data. Consider changing the type in the schema.
""".format(bad_cols))
logger.warn(msg)
warnings.warn(msg, ParserWarning)
for col in bad_cols:
del self.parser_args['dtype'][col]
return self.parser(**self.parser_args)
elif e.message.startswith("Falling back to the 'python' engine"):
del self.parser_args['dtype']
msg = textwrap.dedent("""\
Dtypes are not supported regex delimiters. Ignoring the
dtypes in the schema. Consider fixing this by editing
the schema for better performance.
""")
logger.warn(msg)
logger.info("Removing the dtype argument")
warnings.warn(msg, ParserWarning)
if "error_bad_lines" in self.parser_args:
del self.parser_args['error_bad_lines']
return self.parser(**self.parser_args)
elif e.message.startswith("cannot safely convert"):
loc = int(e.message.split()[-1])
bad_colname = self.colnames[loc]
specified_dtype = self.parser_args['dtype'][bad_colname]
self._remove_unsafe_integer_columns(loc)
msg = textwrap.dedent("""\
The specified dtype for the column '{0}' ({1}) seems to be
incorrect. This has been ignored for now.
Consider fixing this by editing the
schema.""".format(bad_colname, specified_dtype))
logger.warn(msg)
logger.info("dtype for column {} removed.".format(bad_colname))
warnings.warn(msg, UserWarning)
elif e.message.startswith('could not convert string to float'):
bad_cols = self._detect_mismatched_dtype_row(float)
for col in bad_cols:
del self.parser_args['dtype'][col]
msg = textwrap.dedent("""\
The specified dtype for the column '{0}' ({1}) seems to be
incorrect. This has been ignored for now.
Consider fixing this by editing the schema.""".format(bad_cols,
float))
logger.warn(msg)
logger.info("dtype removed for columns:".format(bad_cols))
return self.parser(**self.parser_args)
except AttributeError as e:
if e.message == "'NoneType' object has no attribute 'dtype'":
bad_rows = self._detect_mismatched_dtype_row(int)
for col in bad_rows:
del self.parser_args['dtype'][col]
logger.warn(msg)
logger.info("dtype removed for columns:".format(bad_rows))
return self.parser(**self.parser_args)
except CParserError as e:
self.parser_args['error_bad_lines'] = False
msg = 'Adding the "error_bad_lines=False" argument to the ' + \
'list of parser arguments.'
logger.info(msg)
return self.parser(**self.parser_args)
except Exception as e:
if "Integer column has NA values" in e.message:
bad_rows = self._detect_row_with_na()
new_types = [(col, float) for col in bad_rows]
self._update_dtypes(self.parser_args['dtype'], new_types)
logger.info("Dtypes for following columns changed:")
logger.info(json.dumps(new_types, cls=TypeEncoder))
return self.parser(**self.parser_args)
class DataFrameValidator(HasTraits):
"""A validator class for `pandas.DataFrame` objects."""
# The dataframe in question
data = Instance(pd.DataFrame)
# the column rules to be enforced
column_rules = Dict
# rules related to the dataset itself
rules = Dict
# Column to set as index
index_col = Property(Any, depends_on=['rules'])
# whether to drop duplicates
is_drop_duplicates = Property(Bool, depends_on=['rules'])
# whether to drop NAs
is_drop_na = Property(Bool, depends_on=['rules'])
# Names of columns to be rewritten
column_names = Property(Any, depends_on=['rules'])
# Specifications relating to the selection of rows.
nrows = Property(Any, depends_on=['rules'])
# Whether to shuffle the rows of the dataframe before returning
shuffle = Property(Bool, depends_on=['rules'])
# Unique values to maintain per column
unique_values = Property(Dict, depends_on=['column_rules'])
def _rules_default(self):
return {}
@cached_property
def _get_shuffle(self):
return self.rules.get("shuffle", False)
@cached_property
def _get_index_col(self):
return self.rules.get('index_col', False)
@cached_property
def _get_nrows(self):
return self.rules.get('nrows', {})
@cached_property
def _get_is_drop_na(self):
return self.rules.get("drop_na", True)
@cached_property
def _get_is_drop_duplicates(self):
return self.rules.get("drop_duplicates", True)
@cached_property
def _get_column_names(self):
return self.rules.get("column_names")
@cached_property
def _get_unique_values(self):
uvals = {}
if self.column_rules is not None:
for colname, rules in self.column_rules.iteritems():
uvals[colname] = rules.get('unique_values', [])
return uvals
def apply_uniques(self):
for colname, uniques in self.unique_values.iteritems():
if colname in self.data:
org_vals = self.data[colname].unique()
for val in org_vals:
if len(uniques) > 0:
if val not in uniques:
drop_ix = self.data.index[
self.data[colname] == val]
self.data.drop(drop_ix, axis=0, inplace=True)
def rename_columns(self):
"""Rename columns in dataframe as per the schema."""
if self.column_names is not None:
logger.info("Renaming columns as follows:")
logger.info(json.dumps(self.column_names, cls=TypeEncoder))
if isinstance(self.column_names, dict):
for old_name, new_name in self.column_names.iteritems():
if old_name in self.data:
self.data[new_name] = self.data.pop(old_name)
elif callable(self.column_names):
columns = self.data.columns.copy()
for old_name in columns:
new_name = self.column_names(old_name)
self.data[new_name] = self.data.pop(old_name)
elif isinstance(self.column_names, list):
self.data.columns = self.column_names
def clean(self):
"""Return the converted dataframe after enforcing all rules."""
self.apply_uniques()
if isinstance(self.nrows, dict):
if len(self.nrows) > 0:
if self.nrows.get('random', False):
ix = self.data.index.values.copy()
np.random.shuffle(ix)
self.data = self.data.ix[ix]
count = self.nrows.get('count', self.data.shape[0])
self.data = self.data.ix[self.data.index[:count]]
elif callable(self.nrows):
ix = self.nrows(self.data.index)
self.data = self.data.ix[self.data.index[ix]]
if self.is_drop_duplicates:
x = self.data.shape[0]
try:
self.data.drop_duplicates(inplace=True)
except TypeError:
print "Cannot drop duplicate rows."
y = self.data.shape[0]
logger.info("{0} duplicate rows were dropped.".format(x - y))
for col in self.data:
logger.info("Commence cleaning of column {}".format(col))
series = self.data[col]
rules = self.column_rules.get(col, {})
validator = SeriesValidator(data=series, rules=rules)
self.data[col] = validator.clean()
if len(validator.exclude_values) > 0:
for exval in validator.exclude_values:
self.data.drop(self.data.index[self.data[col] == exval],
inplace=True)
logger.info("Excluding following values from col {0}".format(
col))
logger.info(json.dumps(validator.exclude_values))
# self.data.dropna(inplace=True)
self.rename_columns()
if self.is_drop_na:
x = self.data.shape[0]
try:
self.data.dropna(inplace=True)
except TypeError:
print "Cannot drop na."
y = self.data.shape[0]
logger.info("{0} rows containing NAs were dropped.".format(x - y))
if self.index_col:
self.data.set_index(self.index_col, drop=True, inplace=True)
un_ix = self.data.index.unique()
na_ix = pd.isnull(un_ix)
self.data.drop(un_ix[na_ix], axis=0, inplace=True)
if self.shuffle:
self.data = self.data.sample(self.data.shape[0])
return self.data
class SeriesValidator(HasTraits):
"""A validator class for `pandas.Series` objects."""
# the series in question
data = Instance(pd.Series)
# Rules of validation
rules = Dict
# Whether to drop NAs from the series.
is_drop_na = Property(Bool, depends_on=['rules'])
# Whether to drop duplicates from the series.
is_drop_duplicates = Property(Bool, depends_on=['rules'])
# List of values to exclude
exclude_values = Property(List, depends_on=['rules'])
# Minimum value permitted in the series
minimum = Property(Float, depends_on=['rules'])
# Maximum value permitted in the series
maximum = Property(Float, depends_on=['rules'])
# Regular expression match for series containing strings
regex = Property(Str, depends_on=['rules'])
# List of postprocessors that work in the series
postprocessors = Property(List, depends_on=['rules'])
def do_postprocessing(self):
for postprocessor in self.postprocessors:
org_len = self.data.shape[0]
logger.info("Applying postprocessor on column:")
logger.info(json.dumps(postprocessor, cls=TypeEncoder))
self.data = postprocessor(self.data)
final_len = self.data.shape[0]
if org_len != final_len:
msg = ("Size of column changed after applying postprocessor."
"This could disturb the alignment of your data.")
logger.warn(msg)
warnings.warn(msg, UserWarning)
def do_drop_duplicates(self):
"""Drop duplicates from the series if required."""
if self.is_drop_duplicates:
duplicates = self.data.index[self.data.duplicated()].tolist()
logger.info("Following duplicated rows were dropped:")
logger.info(json.dumps(duplicates))
self.data.drop_duplicates(inplace=True)
def do_drop_na(self):
"""Drop NAs from the series if required."""
if self.is_drop_na:
na_bool = pd.isnull(self.data)
na_rows = self.data.index[na_bool].tolist()
logger.info("Following rows containing NAs were dropped:")
logger.info(json.dumps(na_rows))
self.data.dropna(inplace=True)
def drop_excluded(self):
"""Remove all values specified in `exclude_values`."""
if len(self.exclude_values) > 0:
logger.info("Removing the following exclude values:")
logger.info(json.dumps(self.exclude_values))
for value in self.exclude_values:
self.data.drop(self.data.index[self.data == value],
inplace=True)
def apply_minmax_rules(self):
"""Restrict the series to the minimum and maximum from the schema."""
if self.data.dtype in (int, float, np.dtype('datetime64[ns]')):
if self.minimum != -np.inf:
logger.info("Setting minimum at {0}".format(self.minimum))
self.data = self.data[self.data >= self.minimum]
if self.maximum != np.inf:
logger.info("Setting maximum at {0}".format(self.maximum))
self.data = self.data[self.data <= self.maximum]
def apply_regex(self):
"""Apply a regex filter on strings in the series."""
if self.regex:
if self.data.dtype is np.dtype('O'):
# filter by regex
logger.info("Applying regex filter with the following regex:")
logger.info(self.regex)
self.data = self.data[self.data.str.contains(self.regex)]
def clean(self):
"""Return the converted series after enforcing all rules."""
self.do_drop_duplicates()
self.do_drop_na()
self.do_postprocessing()
self.apply_minmax_rules()
self.apply_regex()
return self.data
@cached_property
def _get_postprocessors(self):
return self.rules.get("postprocessors", [])
@cached_property
def _get_exclude_values(self):
return self.rules.get("exclude", [])
@cached_property
def _get_is_drop_na(self):
return self.rules.get("drop_na", False)
@cached_property
def _get_is_drop_duplicates(self):
return self.rules.get("drop_duplicates", False)
@cached_property
def _get_minimum(self):
return self.rules.get("min", -np.inf)
@cached_property
def _get_maximum(self):
return self.rules.get("max", np.inf)
@cached_property
def _get_regex(self):
return self.rules.get("regex", "")
MYSQL_URL = "mysql+mysqldb://{username}:{password}@{hostname}/{db_name}"
class MySQLTableValidator(HasTraits):
"""A validator used when the data source is a mysql table."""
# Specifications to use when making parser arguments
specs = Dict
# Name of the MySQL table to read
table_name = Property(Str, depends_on=['specs'])
# A dictionary containing the configuration
config = Property(Dict, depends_on=['specs'])
# Username used to connect to the DB
username = Property(Str, depends_on=['config'])
# Password used to connect to the DB
password = Property(Str, depends_on=['config'])
# hostname of the DB
hostname = Property(Str, depends_on=['config'])
# name of the database
db_name = Property(Str, depends_on=['config'])
# Chunksize
chunksize = Property(Any, depends_on=['specs'])
# Query
query = Property(Str, depends_on=['specs'])
# SQlAlchemy connection object to be used by the parser
connection = Property(Any, depends_on=['username', 'password', 'hostname',
'db_name'])
# Parser args to be used by the pandas parser
parser_args = Property(Dict, depends_on=['connection', 'specs'])
@cached_property
def _get_chunksize(self):
return self.specs.get("chunksize")
@cached_property
def _get_config(self):
return self.specs.get("config")
@cached_property
def _get_username(self):
return self.config.get('username')
@cached_property
def _get_password(self):
return self.config.get('password')
@cached_property
def _get_hostname(self):
return self.config.get('hostname')
@cached_property
def _get_db_name(self):
return self.config.get('db_name')
@cached_property
def _get_table_name(self):
return self.config.get("table_name")
@cached_property
def _get_query(self):
return self.specs.get("query")
@cached_property
def _get_connection(self):
from sqlalchemy import create_engine
url = MYSQL_URL.format(username=self.username, password=self.password,
hostname=self.hostname, db_name=self.db_name)
return create_engine(url)
@cached_property
def _get_parser_args(self):
return dict(table_name=self.table_name,
con=self.connection,
coerce_float=self.specs.get("coerce_float", True),
index_col=self.specs.get("index_col"),
parse_dates=self.specs.get("parse_dates"),
columns=self.specs.get("use_columns"),
chunksize=self.specs.get("chunksize"),
query=self.specs.get("query"))
POSTGRE_URL = "postgresql+psycopg2://{username}:{password}@{hostname}/{" \
"db_name}"
class PostGRESTableValidator(HasTraits):
"""A validator used when the data source is a postgres table."""
# Specifications to use when making parser arguments
specs = Dict
# Name of the MySQL table to read
table_name = Property(Str, depends_on=['specs'])
# A dictionary containing the configuration
config = Property(Dict, depends_on=['specs'])
# Username used to connect to the DB
username = Property(Str, depends_on=['config'])
# Password used to connect to the DB
password = Property(Str, depends_on=['config'])
# hostname of the DB
hostname = Property(Str, depends_on=['config'])
# name of the database
db_name = Property(Str, depends_on=['config'])
# Query
query = Property(Str, depends_on=['specs'])
# Chunksize
chunksize = Property(Any, depends_on=['specs'])
# SQlAlchemy connection object to be used by the parser
connection = Property(Any, depends_on=['username', 'password', 'hostname',
'db_name'])
# Parser args to be used by the pandas parser
parser_args = Property(Dict, depends_on=['connection', 'specs'])
@cached_property
def _get_chunksize(self):
return self.specs.get("chunksize")
@cached_property
def _get_config(self):
return self.specs.get("config")
@cached_property
def _get_username(self):
return self.config.get('username')
@cached_property
def _get_password(self):
return self.config.get('password')
@cached_property
def _get_hostname(self):
return self.config.get('hostname')
@cached_property
def _get_db_name(self):
return self.config.get('db_name')
@cached_property
def _get_table_name(self):
return self.specs.get("table_name")
@cached_property
def _get_connection(self):
from sqlalchemy import create_engine
url = POSTGRE_URL.format(username=self.username,
password=self.password,
hostname=self.hostname,
db_name=self.db_name)
return create_engine(url)
@cached_property
def _get_parser_args(self):
return dict(table_name=self.table_name,
con=self.connection,
query=self.specs.get("query"),
coerce_float=self.specs.get("coerce_float", True),
index_col=self.specs.get("index_col"),
parse_dates=self.specs.get("parse_dates"),
columns=self.specs.get("use_columns"),
chunksize=self.specs.get("chunksize"))
TRAIT_NAME_MAP = {
"filepath": "filepath_or_buffer",
"nrows": "nrows",
"index_col": "index_col",
"delimiter": "sep",
"dtypes": "dtype",
"colnames": "usecols",
"na_values": "na_values",
"converters": "converters",
"header": "header",
"error_bad_lines": "error_bad_lines",
"parse_dates": "parse_dates"
}
class SchemaValidator(HasTraits):
"""A validator class for schema in the data dictionary."""
@classmethod
def from_dict(cls, specification):
"""Get a validator from a schema dictionary.
:param specification: Dictionary containing schema specifications.
"""
return cls(specification=specification)
@classmethod
def from_specfile(cls, specfile, name, **kwargs):
"""Get a validator from a schema file.
:param specfile: Path to the schema file.
:param name: Name of the project to create the validator for.
"""
return cls(specfile=specfile, name=name, **kwargs)
def __init__(self, **kwargs):
"""Overwritten to ensure that the `required_args` trait is validated
when the object is created, not when the trait is accessed.
"""
super(SchemaValidator, self).__init__(**kwargs)
if not kwargs.get('is_pickled', False):
self.required_args = ['filepath', 'delimiter']
# Public traits
# Path to the data dictionary
specfile = File(exists=True)
# Name of the dataset described in the data dictionary
name = Str
# whether the data is a mysql table
is_mysql = Property(Bool, depends_on=['specification'])
# whether the data is a mysql table
is_postgresql = Property(Bool, depends_on=['specification'])
# Dict trait that holds the properties of the dataset
specification = Dict
# Path to the file containing the data
filepath = Property(Either(AbsFile, List(AbsFile), Str),
depends_on=['specification', 'specfile'])
# Whether the dataset spans multiple files
is_multifile = Property(Bool, depends_on=['filepath'])
# Whether the dataset is contained in a spreadsheet
is_spreadsheet = Property(Bool, depends_on=['filepath'])
# Default arguments for spreadsheets
non_spreadsheet_args = List
# Name of the sheet containing the dataframe. Only relevant when
# is_spreadsheet is True
sheetname = Property(Str, depends_on=['is_spreadsheet', 'specification'])
# Delimiter
delimiter = Property(Str, depends_on=['specification'])
# number of rows in the dataset
nrows = Property(Any, depends_on=['specification'])
# Index column for the dataset
index_col = Property(Any, depends_on=['specification'])
# A dictionary whose keys are the names of the columns in the dataset, and
# the keys are the datatypes of the corresponding columns
dtypes = Dict(key_trait=Str, value_trait=Type)
# Names of the columns in the dataset. This is just a convenience trait,
# it's value is just a list of the keys of `dtypes`
colnames = Property(List, depends_on=['specification', 'exclude_columns',
'filepath', 'is_multifile'])
# md5 checksum of the dataset file
md5 = Property(Str, depends_on=['filepath'])
# List of values that represent NAs
na_values = Property(Any, depends_on=['specification'])
# Default value of the `parse_dates` argument
parse_dates = Property(Any, depends_on=['specification'])
# List of converters to be applied to the columns. All converters are
# assumed to be callables, which take the series as input and return a
# series.
converters = Property(Dict, depends_on=['specification'])
# Header of the file
header = Property(Any, depends_on=['specification'])
# Names to use for columns in the dataframe
column_names = Property(Any, depends_on=['specification'])
# Rules for the dataframe that can only be enforeced after loading the
# dataset, therefore must be exported to DataFrameValidator.
df_rules = Dict
# List of columns to exclude from the data
exclude_columns = Property(List, depends_on=['specification'])
# Whether pickled arguments exist in the schema
is_pickled = Bool
# Whether to raise errors on malformed lines
error_bad_lines = Property(Bool, depends_on=['specification'])
# Path to pickle file containing parser arguments
pickle_file = Property(AbsFile, depends_on=['specification'])
# Dictionary of arguments loaded from the pickle file.
pickled_args = Property(Dict, depends_on=['pickle_file'])
# List of required traits
# FIXME: Arguments required by the schema should't have to be programmed
# into the validator class. There must be a way to enforce requirements
# right in the schema itself.
required_args = ValidTraitList
# Parser args for pandas
parser_args = Property(Dict, depends_on=['filepath', 'delimiter', 'nrows',
'dtypes', 'colnames'])
# Protected traits
_dtypes = Property(Dict(key_trait=Str, value_trait=Type),
depends_on=['specification'])
# Public interface
def get_parser_args(self):
"""Return parser args as required by pandas parsers."""
return self.parser_args
to_dict = get_parser_args
def set_parser_args(self, specs, write_to_file=False):
"""Magic method required by Property traits."""
self.parser_args = specs
if write_to_file:
logger.info("Following specs for dataset {0}".format(self.name) +
" were written to specfile {0}".format(self.specfile))
with open(self.specfile, "r") as f:
allspecs = yaml.load(f, Loader=Loader)
allspecs[self.name] = specs
with open(self.specfile, "w") as f:
yaml.dump(allspecs, f, Dumper=Dumper,
default_flow_style=False)
else:
logger.info("Following parser args were set for dataset {}".format(
self.name))
logger.info(json.dumps(specs, cls=TypeEncoder))
return True
def _check_md5(self):
import sys
if sys.platform == 'win32':
msg = "Verifying md5 checksums is not yet supported for your OS."
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
if self.md5:
if self.md5 != get_md5_checksum(self.filepath):
msg = \
"""The MD5 checksum of the file {} does not match the one
specified in the schema. This may not be the file you are
looking for."""
logger.warn(msg.format(self.filepath))
warnings.warn(msg.format(self.filepath), UserWarning)
# Property getters and setters
@cached_property
def _get_is_mysql(self):
if "source" in self.specification:
return self.specification.get("source") == "mysql"
return False
@cached_property
def _get_is_postgresql(self):
if "source" in self.specification:
return self.specification.get("source") == "postgresql"
return False
@cached_property
def _get_parse_dates(self):
parse_dates = self.specification.get("parse_dates", False)
if parse_dates:
if isinstance(parse_dates, str):
parse_dates = [parse_dates]
return parse_dates
@cached_property
def _get_filepath(self):
if not self.is_pickled:
fpath = self.specification.get('path', "")
else:
if not (self.is_mysql or self.is_postgresql):
fpath = self.pickled_args['filepath_or_buffer']
else:
return ""
if isinstance(fpath, list):
for path in fpath:
if not (op.exists(path) and op.isabs(path)):
raise TraitError("filepaths must be absolute.")
elif isinstance(fpath, str):
if not op.isabs(fpath):
fpath = op.join(op.dirname(self.specfile), fpath)
if not (self.is_mysql or self.is_postgresql):
if not (op.exists(fpath) and op.isabs(fpath)):
raise TraitError("filepaths must be absolute.")
return fpath
@cached_property
def _get_is_multifile(self):
if not self.is_pickled:
if isinstance(self.filepath, list):
if len(self.filepath) > 1:
return True
return False
@cached_property
def _get_is_spreadsheet(self):
if (not self.is_multifile) and (not self.is_pickled):
return self.filepath.endswith('.xls') or self.filepath.endswith(
'xlsx')
return False
@cached_property
def _get_index_col(self):
ix_col = self.specification.get('index_col', None)
if not isinstance(ix_col, list):
if ix_col is not None:
col_rules = self.specification.get("column_rules")
if col_rules is not None:
if ix_col in col_rules:
self.df_rules["index_col"] = ix_col
return
return ix_col
@cached_property
def _get_sheetname(self):
if self.is_spreadsheet:
return self.specification.get('sheetname', self.name)
def _set_colnames(self, colnames):
self.colnames = colnames
@cached_property
def _get_parser_args(self):
if not (self.is_mysql or self.is_postgresql):
self._check_md5()
args = {}
for traitname, argname in TRAIT_NAME_MAP.iteritems():
args[argname] = getattr(self, traitname)
# Date/Time arguments
# FIXME: Allow for a mix of datetime column groupings and
# individual
# columns
# All column renaming delegated to df_validtor
if self.column_names is not None:
self.df_rules['column_names'] = self.column_names
if self.header not in (0, 'infer'):
del args['usecols']
if self.is_multifile:
arglist = []
for i in range(len(self.filepath)):
argset = copy.deepcopy(args)
argset.update({'filepath_or_buffer': self.filepath[i]})
argset.update({'nrows': self.nrows[i]})
arglist.append(argset)
return arglist
else:
if self.filepath:
args.update({'filepath_or_buffer': self.filepath})
if "nrows" in self.specification:
if isinstance(self.nrows, int):
args.update({'nrows': self.nrows})
elif isinstance(self.nrows, dict):
if self.nrows.get('random', False):
self.df_rules.update({'nrows': self.nrows})
del args['nrows']
if "range" in self.nrows:
start, stop = self.nrows['range']
args['skiprows'] = start
args['names'] = args.pop('usecols')
args['nrows'] = stop - start
if self.nrows.get("count", False) and \
self.nrows.get("shuffle", False):
args['nrows'] = self.nrows.get('count')
self.df_rules['shuffle'] = True
elif callable(self.nrows):
self.df_rules.update({'nrows': self.nrows})
del args['nrows']
self.pickled_args.update(args)
if self.is_spreadsheet:
self.pickled_args['sheetname'] = self.sheetname
self.pickled_args['io'] = self.pickled_args.pop(
'filepath_or_buffer')
for argname in self.non_spreadsheet_args:
self.pickled_args.pop(argname, None)
return self.pickled_args
else:
if self.is_mysql:
self.sql_validator = MySQLTableValidator(
specs=self.specification)
elif self.is_postgresql:
self.sql_validator = PostGRESTableValidator(
specs=self.specification)
return self.sql_validator.parser_args
def _non_spreadsheet_args_default(self):
return ['sep', 'parse_dates', 'nrows', 'names', 'usecols',
'error_bad_lines', 'dtype', 'header']
def _set_parser_args(self, specs):
self.parser_args.update(specs)
@cached_property
def _get_error_bad_lines(self):
return self.specification.get('error_bad_lines', False)
@cached_property
def _get_pickle_file(self):
return self.specification.get('pickle')
@cached_property
def _get_pickled_args(self):
if self.pickle_file is not None:
with open(self.pickle_file, "r") as fid:
args = cPickle.load(fid)
return args
return {}
@cached_property
def _get_exclude_columns(self):
return self.specification.get("exclude_columns", [])
@cached_property
def _get_header(self):
return self.specification.get("header", 'infer')
@cached_property
def _get_column_names(self):
return self.specification.get("column_names")
@cached_property
def _get_converters(self):
return self.specification.get("converters", None)
@cached_property
def _get_md5(self):
return self.specification.get("md5", "")
@cached_property
def _get_na_values(self):
na_values = self.specification.get("na_values", None)
if na_values is None:
na_values = {}
col_rules = self.specification.get("column_rules", {})
for colname, rules in col_rules.iteritems():
if "na_values" in rules:
na_values[colname] = rules['na_values']
if len(na_values) == 0:
na_values = None
return na_values
@cached_property
def _get_colnames(self):
usecols = self.specification.get('use_columns')
if (usecols is None) and (self.is_mysql or self.is_postgresql):
return None
if len(self.exclude_columns) > 0:
if usecols:
for colname in self.exclude_columns:
usecols.remove(colname)
else:
usecols = colnames(self.filepath, sep=self.delimiter)
for colname in self.exclude_columns:
usecols.remove(colname)
else:
if usecols is None:
if self.filepath and not self.is_multifile:
return colnames(self.filepath, sep=self.delimiter)
if self.index_col is not None:
if usecols is not None:
if self.index_col not in usecols:
usecols.append(self.index_col)
return usecols
@cached_property
def _get_nrows(self):
return self.specification.get('nrows', None)
@cached_property
def _get__dtypes(self):
return self.specification.get('dtypes', {})
@cached_property
def _get_delimiter(self):
return self.specification.get('delimiter', ',')
# Trait change handlers
def _specfile_changed(self):
if self.specification == {}:
with open(self.specfile, "r") as f:
self.specification = yaml.load(f,
Loader=Loader).get(
self.name, {})
def _filepath_default(self):
return self.specification.get("path")
def __dtypes_items_changed(self):
self.dtypes = self._dtypes
# Trait initializers
def _specification_default(self):
if op.isfile(self.specfile):
with open(self.specfile, 'r') as f:
data = yaml.load(f, Loader=Loader).get(self.name, {})
return data
return {}
def _dtypes_default(self):
return self._dtypes
def _df_rules_default(self):
return {}
| bsd-3-clause |
zooniverse/aggregation | analysis/old_weather.py | 1 | 1806 | __author__ = 'ggdhines'
import matplotlib
matplotlib.use('WXAgg')
import aggregation_api
from matplotlib import pyplot as plt
import matplotlib.cbook as cbook
project = aggregation_api.AggregationAPI(project_id = 195, environment="staging")
project.__setup__()
cursor = project.postgres_session.cursor()
# stmt = "select name from projects "
# cursor.execute(stmt)
# for r in cursor.fetchall():
# if r[1] == None:
# continue
# if "old" in r[1]:
# print r
# stmt = "select * from users where email = 'greg@zooniverse.org'"
# cursor.execute(stmt)
# for r in cursor.fetchall():
# print r
# assert False
#
# stmt = "select * from workflows where project_id = 195"
# cursor.execute(stmt)
#
# for r in cursor.fetchall():
# print r
# assert False
# print project.workflows
# print project.__sort_annotations__()
stmt = "select subject_ids,annotations from classifications where workflow_id = 611"# and user_id = 42"
cursor.execute(stmt)
for subject_ids,annotations in cursor.fetchall():
subject_id = subject_ids[0]
# subject_id = r[13][0]
# print subject_id
# if subject_id == 2369:
# print r
# continue
print subject_id
fname = project.__image_setup__(subject_id)
image_file = cbook.get_sample_data(fname)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
print annotations
for box in annotations:
#print box["type"]
scale = 1.5
x1 = box["x"]/scale
y1 = box["y"]/scale
x2 = x1 +box["width"]/scale
y2 = y1 + box["height"]/scale
# x1 = 146/scale
# y1 = 154/scale
# x2 = x1 + 1009/scale
# y2 = y1 + 1633/scale
plt.plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1],"-",color="blue")
plt.show()
| apache-2.0 |
shahankhatch/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
moonboots/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 3 | 8770 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a string.
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name).split()
f.close()
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], '->', labels[i, 0])
print(reverse_dictionary[batch[i]], '->', reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
tf.initialize_all_variables().run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn and matplotlib to visualize embeddings.")
| apache-2.0 |
josesho/bootstrap_contrast | bootstrap_contrast/old__/bootstrap_tools.py | 2 | 8156 | from __future__ import division
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import norm
from numpy.random import randint
from scipy.stats import ttest_ind, ttest_1samp, ttest_rel, mannwhitneyu, wilcoxon, norm
import warnings
# Keep python 2/3 compatibility, without using six. At some point,
# we may need to add six as a requirement, but right now we can avoid it.
try:
xrange
except NameError:
xrange = range
class bootstrap:
'''Given a numerical one-dimensional array,
the summary statistic and a bootstrapped confidence interval will be returned.
Keyword arguments:
x1, x2: array-like
The data in a one-dimensional array form. Only x1 is required.
If x2 is given, the bootstrapped summary difference between
the two groups (x2-x1) is computed.
paired: boolean, default False
Whether or not x1 and x2 are paired samples.
statfunction: callable, default np.mean
The summary statistic called on data.
smoothboot: boolean, default False
Taken from seaborn.algorithms.bootstrap.
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate).
alpha: float, default 0.05
Denotes the likelihood that the confidence interval produced _does not_
include the true summary statistic. When alpha=0.05, a 95% confidence
interval is produced.
reps: int, default 5000
Number of bootstrap iterations to perform.
Returns:
An `bootstrap` object reporting the summary statistics, percentile CIs,
bias-corrected and accelerated (BCa) CIs, and the settings used.
'''
def __init__(self, x1, x2=None,
paired=False,
statfunction=None,
smoothboot=False,
alpha_level=0.05,
reps=5000):
# Taken from scikits.bootstrap code
# Turn to pandas series.
x1=pd.Series(x1)
diff=False
# Initialise statfunction
if statfunction==None:
statfunction=np.mean
# Compute two-sided alphas.
if alpha_level>1. or alpha_level <0.:
raise ValueError("alpha_level must be between 0 and 1.")
alphas=np.array([alpha_level/2., 1-alpha_level/2.])
if paired:
# check x2 is not None:
if x2 is None:
raise ValueError('Please specify x2.')
else:
x2=pd.Series(x2)
if len(x1)!=len(x2):
raise ValueError('x1 and x2 are not the same length.')
if (x2 is None) or (paired is True) :
if x2 is None:
tx=x1
paired=False
else:
tx=x2-x1
diff=True
# Turns data into array, then tuple.
tdata=(tx,)
# The value of the statistic function applied just to the actual data.
summ_stat=statfunction(*tdata)
## Convenience function invoked to get array of desired bootstraps see above!
# statarray=getstatarray(tdata, statfunction, reps, sort=True)
statarray=sns.algorithms.bootstrap(tx, func=statfunction, n_boot=reps, smooth=smoothboot)
statarray.sort()
# Get Percentile indices
pct_low_high=np.round((reps-1)*alphas)
pct_low_high=np.nan_to_num(pct_low_high).astype('int')
# Statistical tests.
if paired is True:
ttest_single='NIL'
ttest_paired=ttest_rel(x1,x2)
wilcoxonresult=wilcoxon(x1,x2)[1]
else:
ttest_single=ttest_1samp(x1,0)[1]
ttest_paired='NIL'
wilcoxonresult=wilcoxon(x1)[1]
mannwhitneyresult='NIL'
elif x2 is not None and paired is False:
diff=True
# Generate statarrays for both arrays.
ref_statarray=sns.algorithms.bootstrap(x1, func=statfunction, n_boot=reps, smooth=smoothboot)
exp_statarray=sns.algorithms.bootstrap(x2, func=statfunction, n_boot=reps, smooth=smoothboot)
tdata=exp_statarray-ref_statarray
statarray=tdata.copy()
statarray.sort()
tdata=(tdata,) # Note tuple form.
# The difference as one would calculate it.
summ_stat=statfunction(x2)-statfunction(x1)
# Get Percentile indices
pct_low_high=np.round((reps-1)*alphas)
pct_low_high=np.nan_to_num(pct_low_high).astype('int')
# Statistical tests.
ttest_single='NIL'
ttest_paired=ttest_ind(x1, x2)
mannwhitneyresult=mannwhitneyu(x1, x2)
wilcoxonresult='NIL'
# Get Bias-Corrected Accelerated indices convenience function invoked.
bca_low_high=bca(tdata, alphas, statarray, statfunction, summ_stat, reps)
# Warnings for unstable or extreme indices.
for ind in [pct_low_high, bca_low_high]:
if np.any(ind==0) or np.any(ind==reps-1):
warnings.warn("Some values used extremal samples results are probably unstable.")
elif np.any(ind<10) or np.any(ind>=reps-10):
warnings.warn("Some values used top 10 low/high samples results may be unstable.")
self.summary=summ_stat
self.is_paired=paired
self.is_difference=diff
self.statistic=str(statfunction)
self.n_reps=reps
self.ci=(1-alpha_level)*100
self.pct_ci_low=statarray[pct_low_high[0]]
self.pct_ci_high=statarray[pct_low_high[1]]
self.bca_ci_low=statarray[bca_low_high[0]]
self.bca_ci_high=statarray[bca_low_high[1]]
self.stat_array=np.array(statarray)
self.pct_low_high_indices=pct_low_high
self.bca_low_high_indices=bca_low_high
self.pvalue_1samp_ttest=ttest_single
self.pvalue_2samp_ttest=ttest_paired
self.pvalue_wilcoxon=wilcoxonresult
self.pvalue_mannWhitney=mannwhitneyresult
self.results={'stat_summary':self.summary,
'is_difference':diff,
'is_paired':paired,
'bca_ci_low':self.bca_ci_low,
'bca_ci_high':self.bca_ci_high,
'ci':self.ci}
def jackknife_indexes(data):
# Taken without modification from scikits.bootstrap package
"""
From the scikits.bootstrap package.
Given data points data, where axis 0 is considered to delineate points, return
a list of arrays where each array is a set of jackknife indexes.
For a given set of data Y, the jackknife sample J[i] is defined as the data set
Y with the ith data point deleted.
"""
base=np.arange(0,len(data))
return (np.delete(base,i) for i in base)
def bca(data, alphas, statarray, statfunction, ostat, reps):
'''Subroutine called to calculate the BCa statistics.'''
# The bias correction value.
z0=norm.ppf( ( 1.0*np.sum(statarray < ostat, axis=0) ) / reps )
# Statistics of the jackknife distribution
jackindexes=jackknife_indexes(data[0]) # I use the scikits.bootstrap function here.
jstat=[statfunction(*(x[indexes] for x in data)) for indexes in jackindexes]
jmean=np.mean(jstat,axis=0)
# Acceleration value
a=np.sum( (jmean - jstat)**3, axis=0 ) / ( 6.0 * np.sum( (jmean - jstat)**2, axis=0)**1.5 )
if np.any(np.isnan(a)):
nanind=np.nonzero(np.isnan(a))
warnings.warn("Some acceleration values were undefined. \
This is almost certainly because all values \
for the statistic were equal. Affected \
confidence intervals will have zero width and \
may be inaccurate (indexes: {}). \
Other warnings are likely related.".format(nanind))
zs=z0 + norm.ppf(alphas).reshape(alphas.shape+(1,)*z0.ndim)
avals=norm.cdf(z0 + zs/(1-a*zs))
nvals=np.round((reps-1)*avals)
nvals=np.nan_to_num(nvals).astype('int')
return nvals | mit |
kensugino/jGEM | jgem/plottracks.py | 1 | 12515 | """Basic parts for plotting bigwig (coverage etc.), genes, ideograms.
"""
import os
import re
try:
from itertools import izip
except:
izip = zip
from itertools import chain
import numpy as N
import pandas as PD
import matplotlib.pyplot as PP
from matplotlib.collections import BrokenBarHCollection
import matplotlib.patches as MP
#from ngslib import wWigIO
#import bx
#from bx.bbi.bigwig_file import BigWigFile
from jgem.bxbbi.bigwig_file import BigWigFile
import jgem.plotutils as PU
import jgem.plotgenes as PG
FONTSIZE=6
####################### util for drawing Ideograms
# modified based on https://www.biostars.org/p/147364/#147637
IDEOCOLS = ['chrom', 'start', 'end', 'name', 'gieStain']
class Ideograms(object):
color_lookup = {
'gneg': (1., 1., 1.),
'gpos25': (.6, .6, .6),
'gpos33': (.5,.5,.5),
'gpos50': (.4, .4, .4),
'gpos66': (.3,.3,.3),
'gpos75': (.2, .2, .2),
'gpos100': (0., 0., 0.),
'acen': (.8, .4, .4),
'gvar': (.8, .8, .8),
'stalk': (.9, .9, .9),
}
chrom_list = ['chr%s' % i for i in (list(range(1, 20)) + ['X', 'Y'])]
def __init__(self, fpath, chrom_list=None):
if chrom_list is not None:
self.chrom_list = chrom_list
self.ideo = PD.read_table(fpath,skiprows=1,names=IDEOCOLS)
self.ideo = self.ideo[[x in self.chrom_list for x in self.ideo.chrom]]
self.ideo['width'] = self.ideo.end - self.ideo.start
self.ideo['colors'] = [self.color_lookup[x] for x in self.ideo.gieStain]
def make_collection(self, df, chrom, ypos=0.25, height=0.5, **kwargs):
if 'width' not in df.columns:
df['width'] = df.end - df.start
df0 = df[df['chrom']==chrom]
xranges = df0[['start','width']].values
colors = df0['colors']#.values
yrange = (ypos,height)
bbhc = BrokenBarHCollection(xranges, yrange, facecolors=colors, **kwargs)
xmin = df0.start.min()
xmax = df0.end.max()
return bbhc, xmin, xmax
def draw_one(self, chrom, ax, regions=[], drawname=False, **kwargs):
h = 0.4
y = 0.2
bbhc,xmin,xmax = self.make_collection(self.ideo, chrom, y, h, **kwargs)
ax.add_collection(bbhc)
ax.set_xlim(xmin-1e7,xmax+1e7)
y1 = 0.05
h1 = y+h+(y-y1)
for region in regions:
x = region[0]
w = max(region[1] - x, xmax*0.02)
ax.add_patch(MP.Rectangle((x,y1),w,h1, facecolor='red', linewidth=0, alpha=0.8))
if drawname:
xmid = (xmin+xmax)/2.
ypos = y1+h1
ax.text(xmid,ypos,chrom,ha='center',va='bottom')
PP.setp(ax, xticks=[], yticks=[], frame_on=False)
return ax, xmin, xmax, y1+h1
class Panel(object):
"""
Holds tracks.
"""
def __init__(self, tracks=[], figsize=(3,4)):
self.tracks = tracks
self.figsize = figsize
def draw(self, ax=None, frameon=False):
if ax is None:
fig,ax = PP.subplots(1,1,figsize=self.figsize)
PP.setp(ax, xticks=[], yticks=[], frame_on=False)
# make axes for each track and dispatch draw command
subaxes = self.make_subaxes(ax, frameon)
for sax, track in zip(subaxes, self.tracks):
PP.setp(sax, xticks=[], yticks=[])
track.draw(ax=sax)
self.ax = ax
return ax
def make_subaxes(self, ax, frameon=True):
fig = ax.get_figure()
tracks = self.tracks
n = len(tracks)
# simplest for now: divide equally in vertical direction
tot_h = float(N.sum([x.h for x in tracks]))
self.hs = hs = [x.h/tot_h for x in tracks]
self.ys = ys = list(N.cumsum(hs[::-1])[::-1])[1:]+[0.]
rects = [[0.,y,1.,0.9*h] for h,y in zip(hs,ys)]
#h = 1./n
#rects = [[0.,h*i,1.,0.9*h] for i in range(n)][::-1]
args = dict(axisbg='w',frameon=frameon, xticks=[], yticks=[])
subaxes = [PU.add_subplot_axes(ax,r,args,noticks=True) for r in rects]
return subaxes
class Track(object):
def __init__(self, name):
self.name = name
self.h = 1.
def draw(self, ax):
ax.text(0.5,0.5,self.name,ha='center',va='center')
def locus2pos(locus):
chrom, tmp = locus.split(':')
st, ed = map(int, tmp.split('-'))
return chrom,st,ed
def pos2locus(pos):
return '{0}:{1:d}-{2:d}'.format(*pos)
class Ideogram(Track):
def __init__(self, pos, ideofile, h=0.7, fontsize=FONTSIZE):
"""
Args:
pos: (chrom, st, ed)
ideofile: path to ideogram file
fontsize: font size
"""
self.h = h
chrom, st, ed = pos
self.chrom = chrom
self.pos = (st,ed)
self.name = pos2locus(pos)
self.ideo = Ideograms(fpath=ideofile)
self.fontsize = fontsize
def draw(self, ax):
ax,xmin,xmax,ymax = self.ideo.draw_one(self.chrom, ax, [self.pos])
xmid = (xmin+xmax)/2.
ax.text(xmid,ymax,self.name,ha='center',va='bottom',fontsize=self.fontsize)
class Bed12Gene(Track):
def __init__(self, pos, bedline=None, attr=None, color='k', **kwargs):
if attr is not None:
self.attr = attr
else:
assert(bedline is not None)
self.attr = dict(zip(BEDCOLS, bedline.split('\t')))
for c in ['esizes','estarts']:
self.attr[c] = map(int, self.attr[c].strip()[:-1].split(','))
for c in ['st','ed','tst','ted','nexons']:
self.attr[c] = int(self.attr[c])
self.name = self.attr['name']
self.chrom, self.st, self.ed = pos
assert (self.st<self.ed)
self.color=color
self.kwargs = kwargs
def draw(self, ax):
a = self.attr
st,ed = a['st'],a['ed']
xmin = self.st #- width*self.margin
xmax = self.ed #+ width*self.margin
width = xmax-xmin #ed-st
#print 'gene,st,ed,width,margin,xmin,xmax',st,ed,width,self.margin,xmin,xmax
xmid = (xmin+xmax)/2.
ymax=0.8
h = 0.4
ymin = ymax - h
ymid = ymax-h/2.
# draw genome line
ax.plot([xmin,xmax], [ymid,ymid], 'grey') # base
# draw arrows in introns
estsi = zip(a['estarts'],a['esizes'])
ists = [st+est+esi for est,esi in estsi[:-1]]
ieds = [st+est for est,esi in estsi[1:]]
if a['strand']=='+':
dx,shape,hw = 1,'right',0.25
else:
dx,shape,hw = -1,'right',0.4
hl = 0.05*width
arrowargs = dict(y=ymid, dx=dx, dy=0, shape=shape,
fc='grey', linewidth=0,
head_width=hw, head_length=hl)
#print estsi
#print zip(ists,ieds)
for ist, ied in zip(ists, ieds):
iwidth = ied-ist
imid = (ist+ied)/2.
if iwidth<0.15*width:
continue
#print imid
ax.arrow(imid-dx*(hl+1)/2.,**arrowargs)
# put arrow at xmin and xmax
if st>xmin:
if dx<0:
axmin = xmin+hl-dx
axmax = xmax-dx
else:
axmin = xmin-dx
axmax = xmax-hl-dx
ax.arrow(axmin, **arrowargs)
ax.arrow(axmax, **arrowargs)
# draw exons => BrokenBarHCollection
# draw st=>TSS & TSE=>ed
tss = a['tst'] - st # exon coords are st based
tse = a['ted'] - st
if tss==tse:
estsi1 = estsi3 = []
estsi2 = estsi
else:
estsi1 = [(x,y) for x,y in estsi if x<tss]
estsi2 = [(x,y) for x,y in estsi if ((x+y)>=tss)&(x<=tse)]
estsi3 = [(x,y) for x,y in estsi if (x+y)>tse]
if estsi1:
x0,y0 = estsi1[-1] # last size needs fixing
if (x0+y0)>tss:
estsi1[-1] = (x0, tss-x0)
if estsi2:
x0,y0 = estsi2[0]
if x0<tss: # first start and size need fixing
estsi2[0] = (tss, y0-(tss-x0))
x0,y0 = estsi2[-1]
if (x0+y0)>tse:
estsi2[-1] = (x0,tse-x0)
if estsi3:
x0,y0 = estsi3[0]
if x0<tse:
estsi3[0] = (tse,y0-(tse-x0))
c = self.color
cargs = dict(facecolor=c, edgecolor=c)#, alpha=0.8)
cargs.update(self.kwargs)
#print 'estsi1',estsi1
#print 'estsi2',estsi2
#print 'estsi3',estsi3
# draw UTR
if (len(estsi1)+len(estsi3))>0:
yrange = (ymid-h/4., h/2.)
xranges = [(st+x,y) for x,y in estsi1]+[(st+x,y) for x,y in estsi3]
bbhc = BrokenBarHCollection(xranges, yrange, **cargs)
ax.add_collection(bbhc)
#print '1,3 xranges', xranges
#print '1,3 yrange', yrange
# draw coding
yrange = (ymid-h/2., h)
xranges = [(st+x,y) for x,y in estsi2]
#print '2 xranges', xranges
#print '2 yrange', yrange
bbhc = BrokenBarHCollection(xranges, yrange, **cargs)
ax.add_collection(bbhc)
# draw gene name
txt = '%s (%.1fkb)' % (a['name'], width/1000)
ax.text(xmid, 0, txt, ha='center', va='bottom', fontsize=FONTSIZE)
PP.setp(ax, xticks=[], yticks=[], frame_on=False,
xlim=(xmin,xmax), ylim=(0,1))
#print "gene", txt, xmin, xmax
def compress(wigs, resolution, th=1000):
if len(wigs)<th:
w = wigs
else:
def _gen():
st,ed,h = wigs[0]
v = h*(ed-st)
for x1,x2,h in wigs:
v = (v+h*(x2-ed))
if (x2-st)>resolution:
h = v/(x2-st)
yield (st,x2,h)
st,ed,v = x2,x2,0
else:
ed = x2
w = [x for x in _gen()]
return wig2xy(w)
def wig2xy(wigs):
x1,x2,h = zip(*wigs)
z = N.zeros(len(h))
x = list(chain.from_iterable(izip(x1,x1,x2,x2)))
y = list(chain.from_iterable(izip( z, h, h, z)))
return x,y
def compress2(wigs, window, minbins):
st,x2,h = wigs[0]
x1,ed,h = wigs[-1]
nbins = (ed-st)/window
if nbins<minbins:
window = max(2, int(float(ed-st)/minbins))
nbins = (ed-st)/window
if nbins >= 4*len(wigs):
return wig2xy(wigs)
y0 = N.zeros(ed-st)
for x1,x2,h in wigs:
y0[x1-st:x2-st] = h
win = N.ones(window)
y = subsample(y0, window)
x = N.arange(st, ed+window, window)[:len(y)]
return x,y
def subsample(arr, n):
end = n * int(len(arr)/n)
return N.mean(arr[:end].reshape(-1, n), 1)
class BigWig(Track):
def __init__(self, fname, pos, name=None, h=0.6,
ymax=5,drawymax=False,drawname=True,
color='b',fontsize=FONTSIZE,resolution=100,minbins=100):
self.h = h
self.fname = fname
self.pos = pos
if name is None:
name = os.path.basename(fname).replace('_star','').replace('.bw','')
self.name = name
self.chrom,self.st, self.ed = pos
assert (self.st<self.ed)
#self.margin = margin
width = self.ed - self.st
self.xmin = self.st# - margin*width
self.xmax = self.ed# + margin*width
self.ymax = ymax
self.color = color
self.drawymax=drawymax
self.drawname=drawname
self.fontsize = fontsize
self.resolution=resolution
self.minbins=minbins
#print 'st,ed,width,margin,xmin,max', self.st,self.ed,width,margin, self.xmin, self.xmax
#self.bw = BigWigFile(fname)
def draw(self, ax):
#bw = self.fname
#wWigIO.open(bw)
#wigs = wWigIO.getIntervals(bw, self.chrom,self.st,self.ed)
#wWigIO.close(bw)
with open(self.fname) as fobj:
bw = BigWigFile(fobj)
wigs = bw.get(self.chrom, self.st, self.ed)
xmin,xmax = self.xmin,self.xmax
ymax = self.ymax
fs = self.fontsize
# draw track name
if self.drawname:
ax.text((xmin+xmax)/2., ymax, self.name, ha='center',va='top',fontsize=fs)
# draw ymax indicator
if self.drawymax:
ax.text(xmax,ymax,'%g'% ymax, ha='right',va='top',fontsize=fs)
if len(wigs)>0:
#x,y = compress(wigs, self.resolution)
x,y = compress2(wigs,self.resolution,self.minbins)
#print "%s #wigs %d #x/4 %d" % (self.name, len(wigs), len(x)/4)
ax.fill_between(x,0,y,linewidth=0,color=self.color)
# plot baseline
ymin = -(ymax/30.)
ax.plot([xmin,xmax],[ymin,ymin],'grey')
PP.setp(ax, xlim=(xmin,xmax), ylim=(2*ymin,ymax), frame_on=False)
class Gene(Track):
"""Track for plotting a gene using :class:SpliceFig class in :module:plotgene
"""
def __init__(self, pos, ex, sj, cmap='R', collapsey=False, h=1,
compress=False, fontsize=FONTSIZE, **kwargs):
if collapsey:
self.h = h
else:
self.h = h*1.5
self.sf = PG.SpliceFig(ex,sj,compress=compress,fontsize=fontsize,**kwargs)
self.compress = compress
self.chrom,self.st, self.ed = pos
self.xlim = (self.st,self.ed)
self.cmap = cmap
self.collapsey = collapsey
def draw(self, ax):
sf = self.sf
if self.collapsey:
sf.ex['ey'] = 0
if self.compress:
sf.draw(ax, xlim=None, cm=self.cmap)
else:
sf.draw(ax, xlim=self.xlim, cm=self.cmap)
class Line(Track):
"""Track for plotting line"""
def __init__(self, y, ylim=None, h=1.,fontsize=FONTSIZE, **kw):
self.h = h
self.y = y
self.ylim = ylim
self.kw = kw
self.fs = fontsize
def draw(self, ax):
ax.plot(self.y, **self.kw)
if self.ylim:
ax.set_ylim(self.ylim)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ax.text(xmax,ymax,'{0:.1g}'.format(ymax), ha='right',va='top',fontsize=self.fs)
class Image(Track):
"""Track for plotting image"""
def __init__(self, z, h=0.2, zlim=None, cm='jet', **kw):
self.h = h
self.z = z
self.zlim = zlim
self.cm = cm
self.kw = kw
def draw(self, ax):
if self.zlim is not None:
vmin,vmax=self.zlim
ax.imshow(self.z, aspect='auto', interpolation='nearest', cmap=self.cm, vmin=0, vmax=vmax)
else:
ax.imshow(self.z, aspect='auto', interpolation='nearest', cmap=self.cm)
class TrackWrapper(Track):
def __init__(self, func, **args):
self.func = func
self.args = args
def draw(self, ax):
args = self.args
args['ax'] = ax
self.func(**args)
| mit |
joernhees/scikit-learn | sklearn/externals/joblib/parallel.py | 24 | 33170 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
from contextlib import contextmanager
import warnings
try:
import cPickle as pickle
except ImportError:
import pickle
from ._multiprocessing_helpers import mp
from .format_stack import format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend)
from ._compat import _basestring
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
BACKENDS = {
'multiprocessing': MultiprocessingBackend,
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_backend``.
DEFAULT_BACKEND = 'multiprocessing'
DEFAULT_N_JOBS = 1
# Thread local value that can be overriden by the ``parallel_backend`` context
# manager
_backend = threading.local()
def get_active_backend():
"""Return the active default backend"""
active_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
if active_backend_and_jobs is not None:
return active_backend_and_jobs
# We are outside of the scope of any parallel_backend context manager,
# create the default backend instance now
active_backend = BACKENDS[DEFAULT_BACKEND]()
return active_backend, DEFAULT_N_JOBS
@contextmanager
def parallel_backend(backend, n_jobs=-1, **backend_params):
"""Change the default backend used by Parallel inside a with block.
If ``backend`` is a string it must match a previously registered
implementation using the ``register_parallel_backend`` function.
Alternatively backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the ``Parallel`` class constructor. It is particularly useful when calling
into library code that uses joblib internally but does not expose the
backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if isinstance(backend, _basestring):
backend = BACKENDS[backend](**backend_params)
old_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
try:
_backend.backend_and_jobs = (backend, n_jobs)
# return the backend instance to make it easier to write tests
yield backend, n_jobs
finally:
if old_backend_and_jobs is None:
if getattr(_backend, 'backend_and_jobs', None) is not None:
del _backend.backend_and_jobs
else:
_backend.backend_and_jobs = old_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the Parallel class. Moreover, the default backend can be
overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the is the number of workers requested by the callers.
Passing n_jobs=-1 means requesting all available workers for instance
matching the number of CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less
scheduling overhead and better use of CPU cache prefetching heuristics)
as long as all the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
backend, _ = get_active_backend()
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str, ParallelBackendBase instance or None, \
default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
register_parallel_backend. This will allow you to implement
a backend of your liking.
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float, optional
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers should never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAMdisk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
Memmapping mode for numpy arrays passed to workers.
See 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, timeout=None,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
active_backend, default_n_jobs = get_active_backend()
if backend is None and n_jobs == 1:
# If we are under a parallel_backend context manager, look up
# the default number of jobs and use that instead:
n_jobs = default_n_jobs
self.n_jobs = n_jobs
self.verbose = verbose
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if isinstance(max_nbytes, _basestring):
max_nbytes = memstr_to_bytes(max_nbytes)
self._backend_args = dict(
max_nbytes=max_nbytes,
mmap_mode=mmap_mode,
temp_folder=temp_folder,
verbose=max(0, self.verbose - 50),
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
if backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is
pass
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend()
else:
try:
backend_factory = BACKENDS[backend]
except KeyError:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys())))
backend = backend_factory()
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self._backend = backend
self._output = None
self._jobs = list()
self._managed_backend = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_backend = True
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_backend()
self._managed_backend = False
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_backend(self):
if self._backend is not None:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._backend.apply_async(batch, callback=cb)
self._jobs.append(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto':
batch_size = self._backend.compute_batch_size()
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# Original job iterator becomes None once it has been fully
# consumed : at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
if self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time), ))
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
if getattr(self._backend, 'supports_timeout', False):
self._output.extend(job.get(timeout=self.timeout))
else:
self._output.extend(job.get())
except BaseException as exception:
# Note: we catch any BaseException instead of just Exception
# instances to also include KeyboardInterrupt.
# Stop dispatching any new job in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (backend is not None and
hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
if not isinstance(exception, TransportableException):
raise
else:
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_backend:
self._terminate_backend()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
public-ink/public-ink | server/appengine/lib/numpy/linalg/linalg.py | 11 | 77339 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| gpl-3.0 |
manashmndl/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
daemonmaker/pylearn2 | pylearn2/gui/tangent_plot.py | 44 | 1730 | """
Code for plotting curves with tangent lines.
"""
__author__ = "Ian Goodfellow"
try:
from matplotlib import pyplot
except Exception:
pyplot = None
from theano.compat.six.moves import xrange
def tangent_plot(x, y, s):
"""
Plots a curve with tangent lines.
Parameters
----------
x : list
List of x coordinates.
Assumed to be sorted into ascending order, so that the tangent
lines occupy 80 percent of the horizontal space between each pair
of points.
y : list
List of y coordinates
s : list
List of slopes
"""
assert isinstance(x, list)
assert isinstance(y, list)
assert isinstance(s, list)
n = len(x)
assert len(y) == n
assert len(s) == n
if pyplot is None:
raise RuntimeError("Could not import pyplot, can't run this code.")
pyplot.plot(x, y, color='b')
if n == 0:
pyplot.show()
return
pyplot.hold(True)
# Add dummy entries so that the for loop can use the same code on every
# entry
if n == 1:
x = [x[0] - 1] + x[0] + [x[0] + 1.]
else:
x = [x[0] - (x[1] - x[0])] + x + [x[-2] + (x[-1] - x[-2])]
y = [0.] + y + [0]
s = [0.] + s + [0]
for i in xrange(1, n + 1):
ld = 0.4 * (x[i] - x[i - 1])
lx = x[i] - ld
ly = y[i] - ld * s[i]
rd = 0.4 * (x[i + 1] - x[i])
rx = x[i] + rd
ry = y[i] + rd * s[i]
pyplot.plot([lx, rx], [ly, ry], color='g')
pyplot.show()
if __name__ == "__main__":
# Demo by plotting a quadratic function
import numpy as np
x = np.arange(-5., 5., .1)
y = 0.5 * (x ** 2)
x = list(x)
y = list(y)
tangent_plot(x, y, x)
| bsd-3-clause |
SudipSinha/edu | MathMods/Thesis/code/timings.high.py | 1 | 7626 | from math import exp
import matplotlib.pyplot as plt
m = range( 1, 1001 )
time = [0.000469,0.000473,0.000484,0.000494,0.000502,0.000517,0.00113,0.000534,0.00116,0.000553,0.00117,0.000588,0.00119,0.000598,0.00133,0.00062,0.00136,0.000638,0.00138,0.000661,0.0014,0.000686,0.00142,0.000712,0.00145,0.000735,0.0016,0.00137,0.00166,0.00155,0.00378,0.00155,0.00178,0.0015,0.00173,0.00151,0.00178,0.00153,0.00182,0.00156,0.00184,0.00161,0.00187,0.00163,0.00191,0.00167,0.00194,0.00171,0.00194,0.00173,0.00198,0.00176,0.00201,0.00179,0.00205,0.00183,0.00202,0.00187,0.00206,0.00199,0.00378,0.00205,0.00391,0.00213,0.00393,0.00217,0.00401,0.00221,0.00404,0.00224,0.00405,0.00227,0.00411,0.00232,0.00416,0.00238,0.00421,0.0024,0.00411,0.00246,0.00413,0.00249,0.00442,0.00254,0.00442,0.00257,0.0045,0.00265,0.00454,0.00267,0.00459,0.00273,0.00466,0.00279,0.00469,0.00282,0.00473,0.00287,0.00479,0.0029,0.00485,0.00294,0.00467,0.00286,0.00458,0.00313,0.00495,0.00319,0.00525,0.00474,0.00531,0.00474,0.00537,0.00493,0.00546,0.00496,0.00552,0.00504,0.00557,0.00509,0.00562,0.00513,0.00567,0.00523,0.00572,0.00525,0.00577,0.0053,0.00584,0.00536,0.00587,0.00541,0.00593,0.00543,0.0061,0.00551,0.00622,0.00558,0.00626,0.00561,0.00632,0.00568,0.00643,0.00574,0.00648,0.0058,0.00656,0.00587,0.00657,0.00595,0.00662,0.00601,0.00672,0.00606,0.00677,0.00615,0.00669,0.00622,0.00662,0.00629,0.00666,0.00635,0.00672,0.00639,0.00659,0.00661,0.00689,0.00676,0.00943,0.00688,0.0106,0.0069,0.0109,0.00701,0.0109,0.00708,0.011,0.00713,0.0111,0.00724,0.0111,0.00729,0.0112,0.00741,0.0113,0.0075,0.0115,0.00759,0.0115,0.00766,0.0116,0.00772,0.0117,0.00789,0.0117,0.00791,0.0118,0.00796,0.0119,0.00825,0.0122,0.00815,0.0125,0.00828,0.0127,0.00836,0.0129,0.00844,0.0129,0.00852,0.0131,0.00862,0.0131,0.00871,0.0132,0.0088,0.0133,0.0089,0.0134,0.00888,0.0135,0.00902,0.0136,0.00913,0.0137,0.00916,0.0138,0.00925,0.0135,0.00936,0.0128,0.00946,0.013,0.00941,0.0131,0.00964,0.0136,0.00977,0.0135,0.00985,0.0142,0.0102,0.0143,0.0127,0.0144,0.0133,0.0154,0.0136,0.0156,0.0135,0.0157,0.0137,0.0159,0.0139,0.0161,0.0144,0.0162,0.0145,0.0164,0.0145,0.0164,0.0148,0.0165,0.0149,0.0167,0.0151,0.0169,0.0152,0.0169,0.0153,0.0171,0.0156,0.0172,0.0157,0.0173,0.0157,0.0175,0.0159,0.0179,0.016,0.018,0.0162,0.0181,0.0163,0.0183,0.0165,0.0184,0.0166,0.0186,0.0167,0.0186,0.0168,0.0188,0.017,0.019,0.0171,0.0191,0.0172,0.0193,0.0174,0.0192,0.0175,0.0193,0.0177,0.0194,0.0176,0.0184,0.0178,0.0186,0.0175,0.0187,0.0176,0.0187,0.0178,0.0189,0.018,0.019,0.018,0.0191,0.0182,0.0189,0.0184,0.019,0.0188,0.0189,0.0187,0.0195,0.0189,0.0197,0.0191,0.0226,0.0192,0.0246,0.0194,0.0263,0.0197,0.0269,0.0197,0.027,0.0201,0.028,0.0202,0.0286,0.0206,0.0289,0.0211,0.0285,0.0207,0.0287,0.021,0.0288,0.0211,0.029,0.0213,0.0292,0.0216,0.0294,0.0217,0.0297,0.0219,0.0299,0.0222,0.0299,0.0224,0.0301,0.0223,0.0302,0.0226,0.0305,0.0228,0.0306,0.0229,0.0311,0.023,0.0313,0.0233,0.0318,0.0234,0.0319,0.0237,0.0321,0.0238,0.0324,0.0241,0.0326,0.024,0.033,0.0245,0.0331,0.0244,0.0333,0.0247,0.0335,0.0251,0.0333,0.025,0.0334,0.0252,0.0334,0.0254,0.0337,0.0257,0.0339,0.0258,0.0334,0.0262,0.0333,0.0263,0.0337,0.0266,0.0329,0.0268,0.0331,0.0269,0.0333,0.0271,0.0337,0.0269,0.0338,0.0271,0.0353,0.0273,0.0353,0.0273,0.0349,0.0277,0.0354,0.0282,0.0351,0.0285,0.0353,0.0293,0.0367,0.0331,0.0371,0.0337,0.0381,0.034,0.038,0.0341,0.0382,0.0354,0.0387,0.0356,0.039,0.0358,0.0396,0.0363,0.0409,0.0365,0.0411,0.0369,0.041,0.0371,0.0413,0.0375,0.0414,0.0376,0.0417,0.0378,0.042,0.0382,0.0425,0.0384,0.043,0.0387,0.0432,0.0388,0.0433,0.0391,0.0436,0.0395,0.0437,0.04,0.044,0.04,0.0442,0.0404,0.0448,0.0404,0.0449,0.0409,0.0454,0.0408,0.0453,0.0415,0.0457,0.0414,0.0463,0.0416,0.0455,0.0421,0.0452,0.042,0.0456,0.0423,0.0458,0.0426,0.046,0.0429,0.0463,0.0434,0.0468,0.0434,0.0469,0.0437,0.0471,0.0441,0.0475,0.0442,0.0475,0.0444,0.0476,0.0447,0.0481,0.0451,0.0484,0.0452,0.0487,0.0456,0.0491,0.0459,0.0492,0.0461,0.0492,0.0463,0.0498,0.0466,0.0502,0.0471,0.0487,0.0476,0.0481,0.0481,0.049,0.0486,0.0487,0.0489,0.0502,0.0494,0.0505,0.0496,0.0512,0.0501,0.0516,0.0504,0.052,0.0508,0.0594,0.0509,0.0603,0.0515,0.0611,0.0518,0.0617,0.0518,0.0619,0.0522,0.0623,0.0526,0.0625,0.0529,0.0654,0.0532,0.0661,0.0535,0.0663,0.0537,0.0664,0.0541,0.067,0.0544,0.0674,0.0547,0.0678,0.0551,0.0676,0.0555,0.068,0.0558,0.0684,0.0561,0.0695,0.0563,0.0704,0.0569,0.0703,0.0568,0.0711,0.0572,0.0714,0.0573,0.0721,0.0578,0.0729,0.0574,0.0728,0.0581,0.0736,0.0582,0.0741,0.0585,0.0738,0.059,0.0717,0.0591,0.0728,0.0594,0.0729,0.06,0.0738,0.0602,0.0735,0.0605,0.074,0.0595,0.0744,0.0598,0.0751,0.0604,0.0751,0.0604,0.0756,0.061,0.0753,0.0614,0.0761,0.0616,0.076,0.062,0.077,0.0623,0.0768,0.063,0.0771,0.063,0.0777,0.0637,0.078,0.0638,0.0767,0.0643,0.077,0.0643,0.0765,0.0649,0.0769,0.0652,0.0775,0.0659,0.0778,0.0652,0.0783,0.0653,0.0788,0.0648,0.0784,0.0643,0.0786,0.0662,0.081,0.0663,0.081,0.0667,0.0813,0.0671,0.0825,0.0683,0.0827,0.0691,0.0822,0.0697,0.0828,0.0702,0.0828,0.0757,0.0833,0.0769,0.0837,0.0773,0.0843,0.0783,0.0838,0.079,0.0842,0.0796,0.0848,0.08,0.0865,0.0803,0.0865,0.0806,0.0868,0.0809,0.0874,0.0816,0.0881,0.082,0.0882,0.0825,0.0891,0.0831,0.0892,0.0837,0.0893,0.0846,0.09,0.0853,0.0912,0.086,0.0916,0.086,0.0922,0.0865,0.0925,0.0867,0.0933,0.0874,0.094,0.0877,0.0937,0.0881,0.094,0.0886,0.0951,0.0896,0.0952,0.0897,0.0963,0.09,0.0966,0.0903,0.0972,0.0907,0.0979,0.0913,0.0982,0.0918,0.0985,0.0927,0.0993,0.0925,0.0992,0.0929,0.0998,0.0934,0.0998,0.0937,0.101,0.0945,0.101,0.0949,0.102,0.0953,0.102,0.0958,0.103,0.0961,0.103,0.0968,0.103,0.0973,0.104,0.0975,0.105,0.0986,0.105,0.0988,0.105,0.0988,0.106,0.0995,0.106,0.1,0.107,0.1,0.107,0.101,0.108,0.102,0.109,0.102,0.109,0.102,0.108,0.103,0.108,0.104,0.109,0.104,0.108,0.104,0.109,0.105,0.106,0.106,0.107,0.107,0.108,0.107,0.108,0.107,0.108,0.108,0.111,0.109,0.111,0.11,0.112,0.11,0.112,0.11,0.113,0.111,0.114,0.112,0.115,0.112,0.125,0.112,0.127,0.113,0.128,0.114,0.129,0.114,0.131,0.115,0.132,0.115,0.132,0.116,0.132,0.116,0.134,0.117,0.135,0.118,0.135,0.118,0.136,0.119,0.136,0.119,0.136,0.12,0.137,0.12,0.138,0.121,0.139,0.122,0.139,0.122,0.141,0.123,0.141,0.123,0.141,0.124,0.143,0.124,0.143,0.125,0.143,0.126,0.144,0.126,0.145,0.127,0.146,0.128,0.147,0.128,0.148,0.129,0.148,0.129,0.148,0.13,0.149,0.131,0.15,0.131,0.15,0.132,0.151,0.133,0.152,0.133,0.153,0.134,0.153,0.134,0.154,0.135,0.155,0.136,0.155,0.137,0.156,0.137,0.157,0.138,0.157,0.138,0.158,0.139,0.159,0.143,0.159,0.14,0.158,0.141,0.159,0.142,0.16,0.142,0.161,0.143,0.161,0.143,0.161,0.144,0.163,0.144,0.163,0.144,0.164,0.145,0.164,0.146,0.165,0.16,0.169,0.148,0.176,0.15,0.172,0.148,0.17,0.149,0.169,0.147,0.169,0.146,0.17,0.147,0.171,0.151,0.174,0.151,0.177,0.154,0.177,0.161,0.177,0.153,0.185,0.16,0.182,0.155,0.181,0.154,0.177,0.155,0.177,0.155,0.18,0.164,0.185,0.16,0.185,0.178,0.181,0.169,0.182,0.17,0.182,0.172,0.182,0.171,0.183,0.172,0.184,0.174,0.185,0.175,0.188,0.18,0.189,0.177,0.191,0.178,0.192,0.18,0.192,0.18,0.193,0.181]
trend = [(2e-7 * i * i + 1e-5 * i + 0.004) for i in m]
expplot = [(0.004 * 2 ** (0.007*i)) for i in m]
pts, = plt.plot(m, time, color = 'b', marker = '.', markersize = 1e-3)
quad, = plt.plot(m, trend, color = 'g', linewidth = 2.0)
expt, = plt.plot(m, expplot, color = 'r', linestyle = '-', linewidth = 1.5)
plt.axis([0, 1000, 0, 0.3])
plt.legend([pts, quad, expt], ['points', r'Quadratic: $ t = 2 \cdot 10^{-7} m^2 + 10^{-5} m + 0.004 $', r'Exponential: $ t = 0.004 \cdot 2^{0.007 m} $'], loc=2)
plt.tight_layout()
plt.xlabel('m')
plt.ylabel('time (s)')
plt.title('Timing the singular points method ' + r'$ ( \sigma = 0.2 ) $' )
plt.grid(True)
plt.show()
| mit |
kbrose/article-tagging | lib/tagnews/crimetype/tag.py | 2 | 6188 | import os
import pickle
import glob
import time
import pandas as pd
# not used explicitly, but this needs to be imported like this
# for unpickling to work.
from ..utils.model_helpers import LemmaTokenizer # noqa
"""
Contains the CrimeTags class that allows tagging of articles.
"""
MODEL_LOCATION = os.path.join(os.path.split(__file__)[0],
'models',
'binary_stemmed_logistic')
TAGS = ['OEMC', 'CPD', 'SAO', 'CCCC', 'CCJ', 'CCSP',
'CPUB', 'IDOC', 'DOMV', 'SEXA', 'POLB', 'POLM',
'GUNV', 'GLBTQ', 'JUVE', 'REEN', 'VIOL', 'BEAT',
'PROB', 'PARL', 'CPLY', 'DRUG', 'CPS', 'GANG', 'ILSP',
'HOMI', 'IPRA', 'CPBD', 'IMMG', 'ENVI', 'UNSPC',
'ILSC', 'ARSN', 'BURG', 'DUI', 'FRUD', 'ROBB', 'TASR']
def load_model(location=MODEL_LOCATION):
"""
Load a model from the given folder `location`.
There should be at least one file named model-TIME.pkl and
a file named vectorizer-TIME.pkl inside the folder.
The files with the most recent timestamp are loaded.
"""
models = glob.glob(os.path.join(location, 'model*.pkl'))
if not models:
raise RuntimeError(('No models to load. Run'
' "python -m tagnews.crimetype.models.'
'binary_stemmed_logistic.save_model"'))
model = models.pop()
while models:
model_time = time.strptime(model[-19:-4], '%Y%m%d-%H%M%S')
new_model_time = time.strptime(models[0][-19:-4], '%Y%m%d-%H%M%S')
if model_time < new_model_time:
model = models[0]
models = models[1:]
with open(model, 'rb') as f:
clf = pickle.load(f)
with open(os.path.join(location, 'vectorizer-' + model[-19:-4] + '.pkl'),
'rb') as f:
vectorizer = pickle.load(f)
return clf, vectorizer
class CrimeTags():
"""
CrimeTags let you tag articles. Neat!
"""
def __init__(self,
model_directory=MODEL_LOCATION,
clf=None,
vectorizer=None):
"""
Load a model from the given `model_directory`.
See `load_model` for more information.
Alternatively, the classifier and vectorizer can be
provided. If one is provided, then both must be provided.
"""
if clf is None and vectorizer is None:
self.clf, self.vectorizer = load_model(model_directory)
elif clf is None or vectorizer is None:
raise ValueError(('clf and vectorizer must both be None,'
' or both be not None'))
else:
self.clf, self.vectorizer = clf, vectorizer
def tagtext_proba(self, text):
"""
Compute the probability each tag applies to the given text.
inputs:
text: A python string.
returns:
pred_proba: A pandas series indexed by the tag name.
"""
x = self.vectorizer.transform([text])
y_hat = self.clf.predict_proba(x)
preds = pd.DataFrame(y_hat)
preds.columns = TAGS
preds = preds.T.iloc[:, 0].sort_values(ascending=False)
return preds
def tagtext(self, text, prob_thresh=0.5):
"""
Tag a string with labels.
inputs:
text: A python string.
prob_thresh: The threshold on probability at which point
the tag will be applied.
returns:
preds: A list of tags that have > prob_thresh probability
according to the model.
"""
preds = self.tagtext_proba(text)
return preds[preds > prob_thresh].index.values.tolist()
def relevant_proba(self, text):
"""
Outputs the probability that the given text is relevant.
This probability is computed naively as the maximum of
the probabilities each tag applies to the text.
A more nuanced method would compute a joint probability.
inputs:
text: A python string.
returns:
relevant_proba: Probability the text is relevant.
"""
return max(self.tagtext_proba(text))
def relevant(self, text, prob_thresh=0.05):
"""
Determines whether given text is relevant or not. Relevance
is defined as whether any tag has more than prob_thresh
chance of applying to the text according to the model.
inputs:
text: A python string.
prob_thresh: The threshold on probability that
determines relevance. If no tags have >=
prob_thresh of applying to the text, then
the text is not relevant.
returns:
relevant: Boolean. Is the text "relevant"?
"""
return len(self.tagtext(text, prob_thresh)) > 0
def get_contributions(self, text):
"""
Rank the words in the text by their contribution to each
category. This function assumes that clf has an attribute
`coef_` and that vectorizer has an attribute
`inverse_transform`.
inputs:
text: A python string.
returns:
contributions: Pandas panel keyed off [category, word].
Example:
>>> s = 'This is an article about drugs and gangs.'
>>> s += ' Written by the amazing Kevin Rose.'
>>> p = tagger.get_contributions(s)
>>> p['DRUG'].sort_values('weight', ascending=False)
weight
drug 5.549870
copyright 0.366905
gang 0.194773
this 0.124590
an -0.004484
article -0.052026
is -0.085534
about -0.154800
kevin -0.219028
rose -0.238296
and -0.316201
. -0.853208
"""
p = {}
vec = self.vectorizer.transform([text])
vec_inv = self.vectorizer.inverse_transform(vec)
for i, tag in enumerate(TAGS):
p[tag] = pd.DataFrame(
index=vec_inv,
data={'weight': self.clf.coef_[i, vec.nonzero()[1]]}
)
return pd.Panel(p)
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/linear_model/tests/test_ransac.py | 22 | 20592 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(
len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert_less(ransac_estimator.n_trials_, max_trials + 1)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 5)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 5)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_model_valid=is_model_valid,
max_trials=5)
msg = ("RANSAC could not find a valid consensus set")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 0)
assert_equal(ransac_estimator.n_skips_invalid_model_, 5)
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_trials=5,
max_skips=3)
msg = ("RANSAC skipped more iterations than `max_skips`")
assert_raises_regexp(ValueError, msg, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_warn_exceed_max_skips():
global cause_skip
cause_skip = False
def is_data_valid(X, y):
global cause_skip
if not cause_skip:
cause_skip = True
return True
else:
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator,
is_data_valid=is_data_valid,
max_skips=3,
max_trials=5)
assert_warns(UserWarning, ransac_estimator.fit, X, y)
assert_equal(ransac_estimator.n_skips_no_inliers_, 0)
assert_equal(ransac_estimator.n_skips_invalid_data_, 4)
assert_equal(ransac_estimator.n_skips_invalid_model_, 0)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| mit |
weixuanfu/tpot | tpot/tpot.py | 1 | 3465 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
from sklearn.model_selection import train_test_split
from sklearn.utils import _safe_indexing
import numpy as np
from .base import TPOTBase
from .config.classifier import classifier_config_dict
from .config.regressor import regressor_config_dict
class TPOTClassifier(TPOTBase):
"""TPOT estimator for classification problems."""
scoring_function = 'accuracy' # Classification scoring
default_config_dict = classifier_config_dict # Classification dictionary
classification = True
regression = False
def _init_pretest(self, features, target):
"""Set the sample of data used to verify pipelines work
with the passed data set.
This is not intend for anything other than perfunctory dataset
pipeline compatibility testing
"""
num_unique_target = len(np.unique(target))
# make sure train_size is at least num_unique_target
train_size=max(min(50,int(0.9*features.shape[0])), num_unique_target)
self.pretest_X, _, self.pretest_y, _ = \
train_test_split(
features,
target,
random_state=self.random_state,
test_size=None,
train_size=train_size
)
#Make sure there is a least one example from each class
#for this evaluative test sample
if not np.array_equal(np.unique(target), np.unique(self.pretest_y)):
unique_target_idx = np.unique(target,return_index=True)[1]
self.pretest_y[0:unique_target_idx.shape[0]] = \
_safe_indexing(target, unique_target_idx)
class TPOTRegressor(TPOTBase):
"""TPOT estimator for regression problems."""
scoring_function = 'neg_mean_squared_error' # Regression scoring
default_config_dict = regressor_config_dict # Regression dictionary
classification = False
regression = True
def _init_pretest(self, features, target):
"""Set the sample of data used to verify pipelines work with the passed data set.
"""
self.pretest_X, _, self.pretest_y, _ = \
train_test_split(
features,
target,
random_state=self.random_state,
test_size=None,
train_size=min(50,int(0.9*features.shape[0]))
)
| lgpl-3.0 |
deeplook/bokeh | bokeh/sampledata/gapminder.py | 41 | 2655 | from __future__ import absolute_import
import pandas as pd
from os.path import join
import sys
from . import _data_dir
'''
This module provides a pandas DataFrame instance of four
of the datasets from gapminder.org.
These are read in from csvs that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
data_dir = _data_dir()
datasets = [
'fertility',
'life_expectancy',
'population',
'regions',
]
for dataset in datasets:
filename = join(data_dir, 'gapminder_%s.csv' % dataset)
try:
setattr(
sys.modules[__name__],
dataset,
pd.read_csv(filename, index_col='Country')
)
except (IOError, OSError):
raise RuntimeError('Could not load gapminder data file "%s". Please execute bokeh.sampledata.download()' % filename)
__all__ = datasets
# ====================================================
# Original data is from Gapminder - www.gapminder.org.
# The google docs links are maintained by gapminder
# The following script was used to get the data from gapminder
# and process it into the csvs stored in bokeh's sampledata.
"""
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
def _get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = _get_data(fertility_url)
life_expectancy_df = _get_data(life_expectancy_url)
population_df = _get_data(population_url)
regions_df = pd.read_excel(regions_url, index_col=0)
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
fertility_df.to_csv('gapminder_fertility.csv')
population_df.to_csv('gapminder_population.csv')
life_expectancy_df.to_csv('gapminder_life_expectancy.csv')
regions_df.to_csv('gapminder_regions.csv')
"""
# ======================================================
| bsd-3-clause |
arranger1044/spyn | visualize.py | 2 | 5751 | import numpy
import matplotlib
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_pdf import PdfPages
import seaborn
#
# changing font size
seaborn.set_context("poster", font_scale=1.7, rc={'font.size': 32,
# 'axes.labelsize': fontSize,
# 'xtick.labelsize': fontSize,
# 'ytick.labelsize': fontSize,
# 'legend.fontsize': fontSize,
'text.usetex': True
})
# matplotlib.rcParams.update({'font.size': 22})
def beautify_with_seaborn():
#
seaborn.set_style('white')
seaborn.despine(trim=True)
seaborn.set_context('poster')
def visualize_curves(curves,
output=None,
labels=None,
lines=None,
linestyles=None,
linewidths=None,
palette='hls',
markers=None,
loc=None):
"""
WRITEME
"""
seaborn.set_style('white')
# seaborn.set_context('poster')
n_curves = len(curves)
n_lines = len(lines)
#
# default legend location, upper right
if loc is None:
loc = 3
#
# setting the palette
seaborn.set_palette(palette, n_colors=(n_curves + n_lines))
#
# default linestyle
default_linestyle = '-'
if linestyles is None:
linestyles = [default_linestyle for i in range(n_curves)]
default_width = 5
if linewidths is None:
linewidths = [default_width for i in range(n_curves)]
if markers is None:
markers = ['o', 'v', '1', '2', '3']
for i, curve in enumerate(curves):
curve_x, curve_y = curve
if labels is not None:
label = labels[i]
line = pyplot.plot(curve_x, curve_y,
label=label,
linestyle=linestyles[i],
linewidth=linewidths[i],
# marker=markers[i]
)
else:
line = pyplot.plot(curve_x, curve_y,
linestyle=linestyles[i],
linewidth=linewidths[i],
# marker=markers[i]
)
#
# lastly plotting straight lines, if present
if lines is not None:
default_linestyle = '--'
for i, line_y in enumerate(lines):
#
# this feels a little bit hackish, assuming all share the same axis
prototypical_x_axis = curves[0][0]
start_x = prototypical_x_axis[0]
end_x = prototypical_x_axis[-1]
pyplot.plot([start_x, end_x],
[line_y, line_y],
linestyle=default_linestyle,
linewidth=default_width) # linestyles[i + n_curves])
#
# setting up the legend
if labels is not None:
legend = pyplot.legend(labels, loc=loc)
seaborn.despine()
if output is not None:
fig = pyplot.gcf()
fig_width = 18.5
fig_height = 10.5
dpi = 100
fig.set_size_inches(fig_width, fig_height)
fig.savefig(output,
# additional_artists=[legend],
dpi=dpi,
bbox_inches='tight')
pyplot.close(fig)
else:
#
# shall this be mutually exclusive with file saving?
pyplot.show()
DATASET_LIST = ['nltcs', 'msnbc', 'kdd',
'plants', 'baudio', 'jester', 'bnetflix',
'accidents', 'tretail', 'pumsb_star',
'dna', 'kosarek', 'msweb',
'book', 'tmovie', 'cwebkb',
'cr52', 'c20ng', 'bbc', 'ad']
def visualize_histograms(histograms,
output=None,
labels=DATASET_LIST,
linestyles=None,
rotation=90,
legend=None,
y_log=False,
colors=['seagreen', 'orange', 'cornflowerblue']):
"""
Plotting histograms one near the other
"""
n_histograms = len(histograms)
#
# assuming homogeneous data leengths
# TODO: better error checking
n_ticks = len(histograms[0])
bin_width = 1 / (n_histograms + 1)
bins = [[i + j * bin_width for i in range(n_ticks)]
for j in range(1, n_histograms + 1)]
#
# setting up seaborn
seaborn.set_style("white")
seaborn.set_context("poster")
# seaborn.set_palette(palette, n_colors=n_histograms)
fig, ax = pyplot.subplots()
if legend is not None:
_legend = pyplot.legend(legend)
#
# setting labels
middle_histogram = n_histograms // 2 + 1 # if n_histograms > 1 else 0
pyplot.xticks(bins[middle_histogram], DATASET_LIST)
if rotation is not None:
locs, labels = pyplot.xticks()
pyplot.setp(labels, rotation=90)
#
# actual plotting
print(histograms)
for i, histogram in enumerate(histograms):
ax.bar(bins[i], histogram, width=bin_width,
facecolor=colors[i], edgecolor="none",
log=y_log)
seaborn.despine()
if output is not None:
pp = PdfPages(output)
pp.savefig(fig)
pp.close()
if __name__ == '__main__':
labels = [i for i in range(-10, 10)]
points = [numpy.exp(i) for i in labels]
visualize_curves([(labels, points)], labels=['a', 'b'])
| gpl-3.0 |
prlz77/dm4l | plugins/plot/plugin.py | 1 | 2743 | import StringIO
import logging
import urllib
from copy import deepcopy
import numpy as np
from misc import LogStatus
from plugins.abstract_plugin import AbstractPlugin
import matplotlib
#matplotlib.use('QT4Agg')
try:
import seaborn as sns
except ImportError:
logging.getLogger('dm4l').info('Install seaborn for nice looking plots.')
import pylab
class Plugin(AbstractPlugin):
def __init__(self, dm4l, config):
super(Plugin, self).__init__(dm4l, config)
if self.config['frontend']:
pylab.ioff()
pylab.hold(True)
def update(self, ids=None):
pylab.cla()
if len(self.dm4l.get_handlers().keys()) == 0:
return True
if ids is None:
ids = self.dm4l.get_handlers().keys()
else:
ids = ids
assert (type(self.config['y']) == list)
assert (type(ids) == list)
l_legend = []
for handler_id in ids:
if self.dm4l.get_handlers()[handler_id].status != LogStatus.ERROR:
data = self.dm4l.get_handlers()[handler_id].get_data()
x = np.array(data[self.config['x']])
for y_field in self.config['y']:
y = self._process_y(np.array(data[y_field]))
pylab.plot(x, y)
pylab.xlabel(self.config['x'])
if len(set(self.config['y'])) == 1:
l_legend += [handler_id]
else:
l_legend += [y_field + ' ' + handler_id]
else:
return False
if len(set(self.config['y'])) == 1:
pylab.ylabel(y_field)
if self.config['title'] is not None:
pylab.title(self.config['title'])
if self.config['y_min'] != 'auto':
pylab.ylim(ymin=self.config['y_min'])
elif self.config['y_max'] != 'auto':
pylab.ylim(ymax=self.config['y_max'])
if self.config['legend']:
pylab.legend(l_legend).draggable()
self._update_gui()
return True
def _process_y(self, y):
y2 = y.copy()
max_value = 100. / self.config['scale']
y2 /= max_value
if self.config['score'] == 'err':
y2 = self.config['scale'] - y2
return y2
def _update_gui(self):
if self.dm4l.refresh > 0:
pylab.pause(0.0000001)
else:
pylab.show()
def _get_img(self):
imgdata = StringIO.StringIO()
pylab.gcf().savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
return 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf))
def _save(self, path):
pylab.savefig(path) | mit |
tiagolbiotech/BioCompass | BioCompass/feature_gen.py | 2 | 3384 | from Bio import SeqIO
import pandas as pd
import re
from Bio.SeqUtils import GC
from sys import argv
script, strain_name, edges_file = argv
edges_df = pd.read_csv(edges_file, sep='\t')
ref_list = edges_df['BGC'].drop_duplicates(inplace=False)
hits_list = edges_df['BLAST_hit'].drop_duplicates(inplace=False)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
for item in ref_list:
gb_record = SeqIO.read(open('../tables/%s.gbk'%item,"r"), "genbank")
table_1 = pd.read_csv('../tables/%s_table1.csv'%item, sep='\t')
for feature in gb_record.features:
if feature.type == "cluster":
col1.append(item)
col2.append(feature.qualifiers['product'])
col3.append('%s'%strain_name)
col4.append(gb_record.description)
col5.append(len(table_1))
size = re.search(r'(\d*):(\d*)(.*)','%s'%feature.location)
col6.append(size.group(2))
GC_cont = GC(str(gb_record.seq))
col7.append(round(GC_cont,2))
if 'N' in gb_record.seq:
col8.append('incomplete')
else:
col8.append('complete')
for item in hits_list:
l = re.search(r'^%s(.*)$'%strain_name, str(item))
m = re.search(r'^BGC(.*)$', str(item))
n = re.search(r'(\S*)_\d\d\d', str(item))
if n != None and l == None:
gb_record = SeqIO.read(open('../../database_clusters/%s/%s.gbk'%(n.group(1),item),"r"), "genbank")
for feature in gb_record.features:
if feature.type == "cluster":
col1.append(item)
col2.append(feature.qualifiers['product'])
col3.append('%s'%n.group(1))
col4.append(gb_record.description)
CDSs = []
for feature in gb_record.features:
if feature.type == 'CDS':
CDSs.append(feature)
col5.append(len(CDSs))
col6.append(len(gb_record.seq))
GC_cont = GC(str(gb_record.seq))
col7.append(round(GC_cont,2))
if 'N' in gb_record.seq:
col8.append('incomplete')
else:
col8.append('complete')
if m != None:
gb_record = SeqIO.read(open('../../database_clusters/MIBiG/%s.gbk'%item,"r"), "genbank")
col1.append(item)
col2.append('MIBiG_%s'%gb_record.description)
col3.append('MIBiG')
col4.append(gb_record.annotations['organism'])
CDSs = []
for feature in gb_record.features:
if feature.type == 'CDS':
CDSs.append(feature)
col5.append(len(CDSs))
col6.append(len(gb_record.seq))
GC_cont = GC(str(gb_record.seq))
col7.append(round(GC_cont,2))
if 'N' in gb_record.seq:
col8.append('incomplete')
else:
col8.append('complete')
frames = {'BGC':col1,'Category':col2, 'Origin':col3, 'Organism':col4, 'Number_of_genes':col5, 'Size(bp)':col6, 'GC_content':col7, 'Completness':col8}
features_df = pd.DataFrame(frames, index=None)
features_handle = open('%s_features.txt' % strain_name, "w")
features_df.to_csv(features_handle, sep='\t', index=False)
features_handle.close()
| bsd-3-clause |
kcavagnolo/astroML | book_figures/chapter9/fig_photoz_tree.py | 3 | 3637 | """
Photometric Redshifts by Decision Trees
---------------------------------------
Figure 9.14
Photometric redshift estimation using decision-tree regression. The data is
described in Section 1.5.5. The training set consists of u, g , r, i, z
magnitudes of 60,000 galaxies from the SDSS spectroscopic sample.
Cross-validation is performed on an additional 6000 galaxies. The left panel
shows training error and cross-validation error as a function of the maximum
depth of the tree. For a number of nodes N > 13, overfitting is evident.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch data and prepare it for the computation
data = fetch_sdss_specgals()
# put magnitudes in a matrix
mag = np.vstack([data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 separate points
mag_test = mag[1::100]
z_test = z[1::100]
#------------------------------------------------------------
# Compute the cross-validation scores for several tree depths
depth = np.arange(1, 21)
rms_test = np.zeros(len(depth))
rms_train = np.zeros(len(depth))
i_best = 0
z_fit_best = None
for i, d in enumerate(depth):
clf = DecisionTreeRegressor(max_depth=d, random_state=0)
clf.fit(mag_train, z_train)
z_fit_train = clf.predict(mag_train)
z_fit = clf.predict(mag_test)
rms_train[i] = np.mean(np.sqrt((z_fit_train - z_train) ** 2))
rms_test[i] = np.mean(np.sqrt((z_fit - z_test) ** 2))
if rms_test[i] <= rms_test[i_best]:
i_best = i
z_fit_best = z_fit
best_depth = depth[i_best]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(wspace=0.25,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# first panel: cross-validation
ax = fig.add_subplot(121)
ax.plot(depth, rms_test, '-k', label='cross-validation')
ax.plot(depth, rms_train, '--k', label='training set')
ax.set_xlabel('depth of tree')
ax.set_ylabel('rms error')
ax.yaxis.set_major_locator(plt.MultipleLocator(0.01))
ax.set_xlim(0, 21)
ax.set_ylim(0.009, 0.04)
ax.legend(loc=1)
# second panel: best-fit results
ax = fig.add_subplot(122)
ax.scatter(z_test, z_fit_best, s=1, lw=0, c='k')
ax.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
ax.text(0.04, 0.96, "depth = %i\nrms = %.3f" % (best_depth, rms_test[i_best]),
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$z_{\rm true}$')
ax.set_ylabel(r'$z_{\rm fit}$')
ax.set_xlim(-0.02, 0.4001)
ax.set_ylim(-0.02, 0.4001)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.show()
| bsd-2-clause |
severinson/coded-computing-tools | overhead_performance_plots.py | 2 | 1489 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import model
import overhead
from plot import get_parameters_size, get_parameters_size_2
def unique_rows_plot():
parameters = get_parameters_size_2()
plt.subplot('111')
results = list()
for p in parameters:
rows = overhead.rows_from_q(parameters=p)
result = dict()
result['rows'] = rows
result['num_source_rows'] = p.num_source_rows
results.append(result)
print(result)
df = pd.DataFrame(results)
plt.plot(df['num_source_rows'], df['rows'] / df['num_source_rows'])
plt.grid()
plt.show()
return
def main():
overheads = np.linspace(1.2, 1.4, 10)
parameters = get_parameters_size()[5:10]
plt.subplot('111')
for p in parameters:
results = list()
for overhead in overheads:
df = overhead.performance_from_overhead(
parameters=p,
overhead=overhead,
)
result = dict()
result['overhead'] = overhead
result['baseline'] = p.computational_delay()
for label in df:
result[label] = df[label].mean()
results.append(result)
print(result)
df = pd.DataFrame(results)
plt.plot(df['overhead'], df['delay'] / df['baseline'], label=p.num_servers)
plt.grid()
plt.legend()
plt.show()
return
if __name__ == '__main__':
unique_rows_plot()
| apache-2.0 |
frank-tancf/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
vybstat/scikit-learn | sklearn/metrics/tests/test_classification.py | 53 | 49781 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/plot_weight_matrices.py | 9 | 6702 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
"""
###############################################################################
# First, we import all necessary modules to extract, handle and plot
# the connectivity matrices
import numpy as np
import matplotlib.pyplot as plt
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
# We now specify a function to extract and plot weight matrices for all
# connections among `E_neurons` and `I_neurons`.
#
# We initialize all the matrices, whose dimensionality is determined by the
# number of elements in each population.
# Since in this example, we have 2 populations (E/I), :math:`2^2` possible
# synaptic connections exist (EE, EI, IE, II).
def plot_weight_matrices(E_neurons, I_neurons):
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
a_EE = nest.GetConnections(E_neurons, E_neurons)
'''
Using `get`, we can extract the value of the connection weight,
for all the connections between these populations
'''
c_EE = a_EE.weight
'''
Repeat the two previous steps for all other connection types
'''
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = a_EI.weight
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = a_IE.weight
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = a_II.weight
'''
We now iterate through the range of all connections of each type.
To populate the corresponding weight matrix, we begin by identifying
the source-node_id (by using .source) and the target-node_id.
For each node_id, we subtract the minimum node_id within the corresponding
population, to assure the matrix indices range from 0 to the size of
the population.
After determining the matrix indices [i, j], for each connection
object, the corresponding weight is added to the entry W[i,j].
The procedure is then repeated for all the different connection types.
'''
a_EE_src = a_EE.source
a_EE_trg = a_EE.target
a_EI_src = a_EI.source
a_EI_trg = a_EI.target
a_IE_src = a_IE.source
a_IE_trg = a_IE.target
a_II_src = a_II.source
a_II_trg = a_II.target
for idx in range(len(a_EE)):
W_EE[a_EE_src[idx] - min(E_neurons),
a_EE_trg[idx] - min(E_neurons)] += c_EE[idx]
for idx in range(len(a_EI)):
W_EI[a_EI_src[idx] - min(I_neurons),
a_EI_trg[idx] - min(E_neurons)] += c_EI[idx]
for idx in range(len(a_IE)):
W_IE[a_IE_src[idx] - min(E_neurons),
a_IE_trg[idx] - min(I_neurons)] += c_IE[idx]
for idx in range(len(a_II)):
W_II[a_II_src[idx] - min(I_neurons),
a_II_trg[idx] - min(I_neurons)] += c_II[idx]
fig = plt.figure()
fig.subtitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[:-1, :-1])
ax2 = plt.subplot(gs[:-1, -1])
ax3 = plt.subplot(gs[-1, :-1])
ax4 = plt.subplot(gs[-1, -1])
plt1 = ax1.imshow(W_EE, cmap='jet')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt1, cax=cax)
ax1.set_title('W_{EE}')
plt.tight_layout()
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
plt.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
plt.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
plt.tight_layout()
#################################################################################
# The script iterates through the list of all connections of each type.
# To populate the corresponding weight matrix, we identify the source-node_id
# (first element of each connection object, `n[0]`) and the target-node_id (second
# element of each connection object, `n[1]`).
# For each `node_id`, we subtract the minimum `node_id` within the corresponding
# population, to assure the matrix indices range from 0 to the size of the
# population.
#
# After determining the matrix indices `[i, j]`, for each connection object, the
# corresponding weight is added to the entry `W[i,j]`. The procedure is then
# repeated for all the different connection types.
#
# We then plot the figure, specifying the properties we want. For example, we
# can display all the weight matrices in a single figure, which requires us to
# use ``GridSpec`` to specify the spatial arrangement of the axes.
# A subplot is subsequently created for each connection type. Using ``imshow``,
# we can visualize the weight matrix in the corresponding axis. We can also
# specify the colormap for this image.
# Using the ``axis_divider`` module from ``mpl_toolkits``, we can allocate a small
# extra space on the right of the current axis, which we reserve for a
# colorbar.
# We can set the title of each axis and adjust the axis subplot parameters.
# Finally, the last three steps are repeated for each synapse type.
| gpl-2.0 |
Prasad9/incubator-mxnet | example/autoencoder/mnist_sae.py | 15 | 4165 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import mxnet as mx
import numpy as np
import logging
import data
from autoencoder import AutoEncoderModel
parser = argparse.ArgumentParser(description='Train an auto-encoder model for mnist dataset.')
parser.add_argument('--print-every', type=int, default=1000,
help='the interval of printing during training.')
parser.add_argument('--batch-size', type=int, default=256,
help='the batch size used for training.')
parser.add_argument('--pretrain-num-iter', type=int, default=50000,
help='the number of iterations for pretraining.')
parser.add_argument('--finetune-num-iter', type=int, default=100000,
help='the number of iterations for fine-tuning.')
parser.add_argument('--visualize', action='store_true',
help='whether to visualize the original image and the reconstructed one.')
parser.add_argument('--num-units', type=str, default="784,500,500,2000,10",
help='the number of hidden units for the layers of the encoder.' \
'The decoder layers are created in the reverse order.')
# set to INFO to see less information during training
logging.basicConfig(level=logging.DEBUG)
opt = parser.parse_args()
logging.info(opt)
print_every = opt.print_every
batch_size = opt.batch_size
pretrain_num_iter = opt.pretrain_num_iter
finetune_num_iter = opt.finetune_num_iter
visualize = opt.visualize
layers = [int(i) for i in opt.num_units.split(',')]
if __name__ == '__main__':
ae_model = AutoEncoderModel(mx.cpu(0), layers, pt_dropout=0.2,
internal_act='relu', output_act='relu')
X, _ = data.get_mnist()
train_X = X[:60000]
val_X = X[60000:]
ae_model.layerwise_pretrain(train_X, batch_size, pretrain_num_iter, 'sgd', l_rate=0.1,
decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1),
print_every=print_every)
ae_model.finetune(train_X, batch_size, finetune_num_iter, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1), print_every=print_every)
ae_model.save('mnist_pt.arg')
ae_model.load('mnist_pt.arg')
print("Training error:", ae_model.eval(train_X))
print("Validation error:", ae_model.eval(val_X))
if visualize:
try:
from matplotlib import pyplot as plt
from model import extract_feature
# sample a random image
original_image = X[np.random.choice(X.shape[0]), :].reshape(1, 784)
data_iter = mx.io.NDArrayIter({'data': original_image}, batch_size=1, shuffle=False,
last_batch_handle='pad')
# reconstruct the image
reconstructed_image = extract_feature(ae_model.decoder, ae_model.args,
ae_model.auxs, data_iter, 1,
ae_model.xpu).values()[0]
print("original image")
plt.imshow(original_image.reshape((28,28)))
plt.show()
print("reconstructed image")
plt.imshow(reconstructed_image.reshape((28, 28)))
plt.show()
except ImportError:
logging.info("matplotlib is required for visualization")
| apache-2.0 |
cangermueller/deepcpg | scripts/dcpg_eval.py | 1 | 8067 | #!/usr/bin/env python
"""Evaluate the prediction performance of a DeepCpG model.
Imputes missing methylation states and evaluates model on observed states.
``--out_report`` will write evaluation metrics to a TSV file using.
``--out_data`` will write predicted and observed methylation state to a HDF5
file with following structure:
* ``chromo``: The chromosome of the CpG site.
* ``pos``: The position of the CpG site on the chromosome.
* ``outputs``: The input methylation state of each cell and CpG site, which \
can either observed or missing (-1).
* ``preds``: The predicted methylation state of each cell and CpG site.
Examples
--------
.. code:: bash
dcpg_eval.py
./data/*.h5
--model_files ./model
--out_data ./eval/data.h5
--out_report ./eval/report.tsv
"""
from __future__ import print_function
from __future__ import division
import os
import random
import sys
import argparse
import h5py as h5
import logging
import numpy as np
import pandas as pd
import six
from deepcpg import data as dat
from deepcpg import evaluation as ev
from deepcpg import models as mod
from deepcpg.data import hdf
from deepcpg.utils import ProgressBar, to_list
class H5Writer(object):
def __init__(self, filename, nb_sample):
self.out_file = h5.File(filename, 'w')
self.nb_sample = nb_sample
self.idx = 0
def __call__(self, name, data, dtype=None, compression='gzip', stay=False):
if name not in self.out_file:
if dtype is None:
dtype = data.dtype
self.out_file.create_dataset(
name=name,
shape=[self.nb_sample] + list(data.shape[1:]),
dtype=dtype,
compression=compression
)
self.out_file[name][self.idx:(self.idx + len(data))] = data
if not stay:
self.idx += len(data)
def write_dict(self, data, name='', level=0, *args, **kwargs):
size = None
for key, value in six.iteritems(data):
_name = '%s/%s' % (name, key)
if isinstance(value, dict):
self.write_dict(value, name=_name, level=level + 1,
*args, **kwargs)
else:
if size:
assert size == len(value)
else:
size = len(value)
self(_name, value, stay=True, *args, **kwargs)
if level == 0:
self.idx += size
def close(self):
self.out_file.close()
class App(object):
def run(self, args):
name = os.path.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Evaluates prediction performance of a DeepCpG model')
p.add_argument(
'data_files',
help='Input data files for evaluation',
nargs='+')
p.add_argument(
'--model_files',
help='Model files',
nargs='+')
p.add_argument(
'-o', '--out_report',
help='Output report file with evaluation metrics')
p.add_argument(
'--out_data',
help='Output file with predictions and labels')
p.add_argument(
'--replicate_names',
help='Regex to select replicates',
nargs='+')
p.add_argument(
'--nb_replicate',
type=int,
help='Maximum number of replicates')
p.add_argument(
'--eval_size',
help='Maximum number of samples that are kept in memory for'
' batch-wise evaluation. If zero, evaluate on entire data set.',
type=int,
default=100000)
p.add_argument(
'--batch_size',
help='Batch size',
type=int,
default=128)
p.add_argument(
'--seed',
help='Seed of random number generator',
type=int,
default=0)
p.add_argument(
'--nb_sample',
help='Number of samples',
type=int)
p.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
p.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if not opts.model_files:
raise ValueError('No model files provided!')
log.info('Loading model ...')
model = mod.load_model(opts.model_files)
log.info('Loading data ...')
nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample)
replicate_names = dat.get_replicate_names(
opts.data_files[0],
regex=opts.replicate_names,
nb_key=opts.nb_replicate)
data_reader = mod.data_reader_from_model(
model, replicate_names, replicate_names=replicate_names)
# Seed used since unobserved input CpG states are randomly sampled
if opts.seed is not None:
np.random.seed(opts.seed)
random.seed(opts.seed)
data_reader = data_reader(opts.data_files,
nb_sample=nb_sample,
batch_size=opts.batch_size,
loop=False, shuffle=False)
meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'],
nb_sample=nb_sample,
batch_size=opts.batch_size,
loop=False, shuffle=False)
writer = None
if opts.out_data:
writer = H5Writer(opts.out_data, nb_sample)
log.info('Predicting ...')
nb_tot = 0
nb_eval = 0
data_eval = dict()
perf_eval = []
progbar = ProgressBar(nb_sample, log.info)
for inputs, outputs, weights in data_reader:
batch_size = len(list(inputs.values())[0])
nb_tot += batch_size
progbar.update(batch_size)
preds = to_list(model.predict(inputs))
data_batch = dict()
data_batch['preds'] = dict()
data_batch['outputs'] = dict()
for i, name in enumerate(model.output_names):
data_batch['preds'][name] = preds[i].squeeze()
data_batch['outputs'][name] = outputs[name].squeeze()
for name, value in six.iteritems(next(meta_reader)):
data_batch[name] = value
if writer:
writer.write_dict(data_batch)
nb_eval += batch_size
dat.add_to_dict(data_batch, data_eval)
if nb_tot >= nb_sample or \
(opts.eval_size and nb_eval >= opts.eval_size):
data_eval = dat.stack_dict(data_eval)
perf_eval.append(ev.evaluate_outputs(data_eval['outputs'],
data_eval['preds']))
data_eval = dict()
nb_eval = 0
progbar.close()
if writer:
writer.close()
report = pd.concat(perf_eval)
report = report.groupby(['metric', 'output']).mean().reset_index()
if opts.out_report:
report.to_csv(opts.out_report, sep='\t', index=False)
report = ev.unstack_report(report)
print(report.to_string())
log.info('Done!')
return 0
if __name__ == '__main__':
app = App()
app.run(sys.argv)
| mit |
Delosari/dazer | bin/lib/ssp_functions/dazer_SSP_example.py | 1 | 2247 | from ssp_synthesis_tools import ssp_fitter
import matplotlib.pyplot as plt
import numpy as np
from timeit import default_timer as timer
dz = ssp_fitter()
#Read parameters from command line
command_dict = dz.load_command_params()
#Read parameters from config file
conf_file_address = 'auto_ssp_V500_several_Hb.config'
config_dict = dz.load_config_params(conf_file_address)
#Update the fit configuration giving preference to the values from the command line
config_dict.update(command_dict)
#Preload stellar bases
dz.load_stellar_bases(dz.ssp_folder, 'FIT3D_example', config_dict)
#We use prepare synthetic observation to compare the quality of the fit
coeff_theo = np.loadtxt(dz.ssp_folder + 'bases_coeff.txt')
spectrum_matrix = np.loadtxt(dz.ssp_folder + config_dict['input_spec'])
obs_wave, obs_flux, obs_fluxVar = spectrum_matrix[:,1], spectrum_matrix[:,2], spectrum_matrix[:,3]
obs_fluxErr = np.sqrt(abs(obs_fluxVar))
synth_obs = dz.generate_synthObs(obs_wave, coeff_theo, dz.sspLib_dict['basesWave'], dz.sspLib_dict['fluxBases'],
config_dict['input_Av'],
config_dict['input_z'],
config_dict['input_sigma'])
#Import input data: spectrum, masks, emision line loc, stellar bases...
dz.preload_fitData(obs_wave, synth_obs, config_dict, obs_fluxErr)
start = timer()
fitting_data_dict = dz.fit_ssp(dz.sspFit_dict['input_z'], dz.sspFit_dict['input_sigma'], dz.sspFit_dict['input_Av'])
end = timer()
print 'bicho', ' time ', (end - start)
#Plot input and output data
fig, axis = plt.subplots(1, 1)
axis.plot(dz.sspFit_dict['obs_wave'], synth_obs, label = 'Synthetic spectrum')
axis.plot(fitting_data_dict['obs_wave'], fitting_data_dict['flux_sspFit'], label='SSP synthesis product')
#Stellar components components
coeff_output = fitting_data_dict['weight_coeffs']
idx_coefs = np.where(coeff_output != 0)
for idx in idx_coefs[0]:
label = 'Component {}'.format(round(coeff_output[idx]),4)
axis.plot(dz.sspFit_dict['obs_wave'], fitting_data_dict['flux_components'][:,idx], label=label)
axis.update({'xlabel':'Wavelength', 'ylabel':'Flux', 'title':'SSP synthesis fitting with 102 bases'})
axis.legend()
plt.show()
| mit |
Chilipp/psyplot_gui | psyplot_gui/console.py | 1 | 11139 | """
An example of opening up an RichJupyterWidget in a PyQT Application, this can
execute either stand-alone or by importing this file and calling
inprocess_qtconsole.show().
Based on the earlier example in the IPython repository, this has
been updated to use qtconsole.
"""
import re
import sys
try:
from qtconsole.inprocess import QtInProcessRichJupyterWidget
except ImportError:
from qtconsole.rich_jupyter_widget import (
RichJupyterWidget as QtInProcessRichJupyterWidget)
import ipykernel
from tornado import ioloop
from zmq.eventloop import ioloop as zmq_ioloop
from qtconsole.inprocess import QtInProcessKernelManager
from psyplot_gui.compat.qtcompat import (
with_qt5, QtCore, Qt, QTextEdit, QTextCursor, QKeySequence, asstring)
from psyplot_gui.common import StreamToLogger
import psyplot
import psyplot_gui
from psyplot_gui import rcParams
from psyplot_gui.common import DockMixin
import psyplot.project as psy
from psyplot.docstring import dedents
import logging
#: HACK: Boolean that is True if the prompt should be used. This unfortunately
#: is necessary for qtconsole >= 4.3 when running the tests
_with_prompt = True
modules2import = [
('psyplot.project', 'psy'),
('xarray', 'xr'),
('pandas', 'pd'),
('numpy', 'np')]
symbols_patt = re.compile(r"[^\'\"a-zA-Z0-9_.]")
logger = logging.getLogger(__name__)
class IPythonControl(QTextEdit):
"""A modified control to show the help of objects in the help explorer"""
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
key = event.key()
if key == Qt.Key_Question or key == Qt.Key_ParenLeft:
self.parentWidget().show_current_help()
elif key == Qt.Key_I and (event.modifiers() & Qt.ControlModifier):
self.parentWidget().show_current_help(True, True)
# Let the parent widget handle the key press event
QTextEdit.keyPressEvent(self, event)
class ConsoleWidget(QtInProcessRichJupyterWidget, DockMixin):
"""A console widget to access an inprocess shell"""
custom_control = IPythonControl
dock_position = Qt.RightDockWidgetArea
title = 'Console'
rc = rcParams.find_and_replace(
'console.', pattern_base='console\.')
intro_msg = ''
run_script = QtCore.pyqtSignal(list)
run_command = QtCore.pyqtSignal(list)
_closed = True
def __init__(self, main, *args, **kwargs):
"""
Parameters
----------
help_explorer: psyplot_gui.help_explorer.HelpExplorer or None
A widget that can be used to show the documentation of an object
``*args,**kwargs``
Any other keyword argument for the
:class:`qtconsole.rich_jupyter_widget.RichJupyterWidget`
"""
self._closed = False
kernel_manager = QtInProcessKernelManager()
# on windows, sys.stdout may be None when using pythonw.exe. Therefore
# we just us a StringIO for security
orig_stdout = sys.stdout
if sys.stdout is None:
sys.stdout = StreamToLogger(logger)
orig_stderr = sys.stderr
if sys.stderr is None:
sys.stderr = StreamToLogger(logger)
kernel_manager.start_kernel(show_banner=False)
if ipykernel.__version__ < '5.1.1':
# monkey patch to fix
# https://github.com/ipython/ipykernel/issues/370
def _abort_queues(kernel):
pass
kernel_manager.kernel._abort_queues = _abort_queues
sys.stdout = orig_stdout
sys.stderr = orig_stderr
kernel = kernel_manager.kernel
kernel.gui = 'qt4' if not with_qt5 else 'qt'
kernel_client = kernel_manager.client()
if rcParams['console.start_channels']:
kernel_client.start_channels()
self.help_explorer = kwargs.pop('help_explorer', None)
super(ConsoleWidget, self).__init__(*args, parent=main, **kwargs)
self.intro_msg = dedents("""
psyplot version: %s
gui version: %s
The console provides you the full access to the current project and
plots.
To make your life easier, the following modules have been imported
- %s
Furthermore, each time you change the selection or the content in the
plot objects viewer, the `sp` (the selection) and `mp` (all arrays)
variables in the console are adjusted. To disable this behaviour, set::
>>> import psyplot_gui
>>> psyplot_gui.rcParams['console.auto_set_mp'] = False
>>> psyplot_gui.rcParams['console.auto_set_sp'] = False
To inspect and object in the console and display it's documentation in
the help explorer, type 'Ctrl + I' or a '?' after the object""") % (
psyplot.__version__, psyplot_gui.__version__,
'\n - '.join('%s as %s' % t for t in modules2import))
self.kernel_manager = kernel_manager
self.kernel_client = kernel_client
self.run_command_in_shell(
'\n'.join('import %s as %s' % t for t in modules2import))
self.exit_requested.connect(self._close_mainwindow)
self.exit_requested.connect(QtCore.QCoreApplication.instance().quit)
# we overwrite the short cut here because the 'Ctrl+S' shortcut is
# reserved for mainwindows save action
try:
main.register_shortcut(
self.export_action, QKeySequence(
'Ctrl+Alt+S', QKeySequence.NativeText))
except AttributeError:
pass
psy.Project.oncpchange.connect(self.update_mp)
psy.Project.oncpchange.connect(self.update_sp)
self.run_script.connect(self._run_script_in_shell)
self.run_command.connect(self._run_command_in_shell)
# HACK: we set the IOloop for the InProcessKernel here manually without
# starting it (not necessary because QApplication has a blocking
# IOLoop). However, we need this because the ZMQInteractiveShell wants
# to call
# loop = self.kernel.io_loop
# loop.call_later(0.1, loop.stop)``
zmq_ioloop.install()
self.kernel_manager.kernel.io_loop = ioloop.IOLoop.current()
def update_mp(self, project):
"""Update the `mp` variable in the shell is
``rcParams['console.auto_set_mp']`` with a main project"""
if self.rc['auto_set_mp'] and project is not None and project.is_main:
self.run_command_in_shell('mp = psy.gcp(True)')
def update_sp(self, project):
"""Update the `sp` variable in the shell is
``rcParams['console.auto_set_sp']`` with a sub project"""
if self.rc['auto_set_sp'] and (project is None or not project.is_main):
self.run_command_in_shell('sp = psy.gcp()')
def show_current_help(self, to_end=False, force=False):
"""Show the help of the object at the cursor position if
``rcParams['console.connect_to_help']`` is set"""
if not force and not self.rc['connect_to_help']:
return
obj_text = self.get_current_object(to_end)
if obj_text is not None and self.help_explorer is not None:
found, obj = self.get_obj(obj_text)
if found:
self.help_explorer.show_help(obj, obj_text)
self._control.setFocus()
def get_obj(self, obj_text):
"""
Get the object from the shell specified by `obj_text`
Parameters
----------
obj_text: str
The name of the variable as it is stored in the shell
Returns
-------
bool
True, if the object could be found
object or None
The requested object or None if it could not be found"""
info = self.kernel_manager.kernel.shell._object_find(
obj_text)
if info.found:
return True, info.obj
else:
return False, None
def get_current_object(self, to_end=False):
"""Get the name of the object at cursor position"""
c = self._control
if not _with_prompt:
try: # qtconsole >4.3 uses the _prompt_cursor attribute
cursor = self._prompt_cursor
except AttributeError:
cursor = c.textCursor()
else:
cursor = c.textCursor()
curr = cursor.position()
start = curr - cursor.positionInBlock()
txt = c.toPlainText()[start:curr]
eol = ''
if to_end:
cursor.movePosition(QTextCursor.EndOfBlock)
end = cursor.position()
if end > curr:
eol = c.toPlainText()[curr:end]
m = symbols_patt.search(eol)
if m:
eol = eol[:m.start()]
if not txt:
return txt
txt = asstring(txt)
txt = txt.rsplit('\n', 1)[-1]
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = symbols_patt.split(txt)
token = None
try:
while token is None or symbols_patt.match(token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
token += txt_end
if token:
return token + eol
except IndexError:
return None
def _run_script_in_shell(self, args):
self.run_script_in_shell(args[0][0])
def run_script_in_shell(self, script):
"""Run a script in the shell"""
self.kernel_manager.kernel.shell.run_line_magic('run', script)
def _run_command_in_shell(self, args):
# 0: filenames
# 1: project
# 2: command
self.run_command_in_shell(args[2])
def run_command_in_shell(self, code, *args, **kwargs):
"""Run a script in the shell"""
ret = self.kernel_manager.kernel.shell.run_code(code, *args, **kwargs)
import IPython
if IPython.__version__ < '7.0': # run_code is an asyncio.coroutine
return ret
else:
import asyncio
gathered = asyncio.gather(ret)
loop = asyncio.get_event_loop()
ret = loop.run_until_complete(gathered)
return ret[0]
def _close_mainwindow(self):
from psyplot_gui.main import mainwindow
if mainwindow is not None:
mainwindow.close()
else:
self.close()
def close(self):
if self.kernel_client.channels_running:
self.kernel_client.stop_channels()
self._closed = True
return super(ConsoleWidget, self).close()
def eventFilter(self, *args, **kwargs):
if self._closed:
return False
return super().eventFilter(*args, **kwargs)
| gpl-2.0 |
dandanvidi/in-vivo-enzyme-kinetics | scripts/helper.py | 3 | 13207 | import cPickle as pickle
import pandas as pd
from trees import Tree
import csv, re
from matplotlib_venn import venn2
import matplotlib.pyplot as plt
from copy import deepcopy
import numpy as np
import seaborn as sb
from collections import defaultdict
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
ppath = "../../proteomics-collection/"
proteomics = pd.DataFrame.from_csv(ppath+"meta_abundance[copies_fL].csv")
pFBA = pd.DataFrame.from_csv("../data/flux[mmol_gCDW_h]_projected.csv")
pFVA = pd.DataFrame.from_csv("../data/flux_variability_[mmol_gCDW_h].csv", header=[0,1]).T
protein_info = pd.read_csv('../data/protein_abundance_info.csv', sep='\t')
gc = pd.DataFrame.from_csv("../data/carbon_sources.csv")
#gc = gc[gc.reference=='Schmidt et al. 2015']
gr = gc['growth rate [h-1]'][gc.index]
fL_cell = gc['single cell volume [fL]'] /2 # fL (cell volumes are overestimated by a factor of 1.7)
fg_cell_old = pd.read_csv('../data/protein_abundance_[fg_cell].csv')
copies_cell_persist = pd.read_csv('../data/protein_abundance_persistors[copies_cell].csv')
model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
convert_to_irreversible(model)
rxns = {r.id:r for r in model.reactions}
def map_proteomics(df):
uni_to_b = {row[48:54]:row[0:5].split(';')[0].strip()
for row in open("../data/all_ecoli_genes.txt", 'r')}
df.replace(to_replace={'UPID':uni_to_b}, inplace=True)
manual_replacememnts = {
'D0EX67':'b1107',
'D4HZR9':'b2755',
'P00452-2':'b2234',
'P02919-2':'b0149',
'Q2A0K9':'b2011',
'Q5H772':'b1302',
'Q5H776':'b1298',
'Q5H777':'b1297',
'Q6E0U3':'b3183'}
df.replace(to_replace={'upid':manual_replacememnts}, inplace=True)
df.set_index('upid', inplace=True)
df.index.name = 'bnumber'
not_identified = ['B8LFD5','D8FH86','D9IX93','E1MTY0','P0CE60','P23477']
df.drop(not_identified, axis=0, inplace=True)
df.sort_index(inplace=True)
def genes_by_function(name):
tree = Tree.FromTMS(open('../data/KO_gene_hierarchy_general.tms', 'r'), 4)
f_KEGG = tree.GetNode(name).children
reader = csv.reader(open('../data/eco_mapping.csv', 'r'), delimiter='\t')
b_to_KEGG = {row[0]:row[2] for row in reader}
return {b for b,ko in b_to_KEGG.iteritems() if ko in f_KEGG}
def convert_copies_fL_to_mmol_gCDW(copies_fL):
rho = 1100 # average cell density gr/liter
DW_fraction = 0.3 # fraction of DW of cells
Avogadro = 6.02214129 # Avogadro's number "exponent-less"
mmol_L = copies_fL / (Avogadro*1e5)
mmol_gCDW = mmol_L / (rho * DW_fraction)
return mmol_gCDW
def convert_mmol_gCDW_to_mg_gCDW(mmol_gCDW):
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
mg_gCDW = mmol_gCDW.mul(protein_g_mol,axis=0)
mg_gCDW.replace(np.nan, 0, inplace=True)
return mg_gCDW
def get_complex_molecular_weight(model):
complexes = pd.DataFrame.from_csv('../data/enzyme_complexes.csv')
comp = list(complexes['Gene composition'].values)
comp = [dict(zip(re.findall(r"b[0-9]+", s),re.findall(r"\(([0-9]+)\)", s))) for s in comp]
protein_info = pd.DataFrame.from_csv('../data/ecoli_genome_info.tsv', sep='\t')
protein_g_mol = protein_info['molecular_weight[Da]']
all_genes = defaultdict(list)
for s in comp:
for k,v in s.iteritems():
all_genes[k].append(float(v))
for bnumber in protein_g_mol.index:
if bnumber not in all_genes.keys():
all_genes[bnumber].append(1.0)
subunit_comp = {k:np.mean(v) for k,v in all_genes.iteritems()}
r_to_weights = {}
for r in model.reactions:
isozymes = r.gene_reaction_rule.split('or')
isozymes = [re.findall(r"b[0-9]+", iso) for iso in isozymes]
weights = [sum([subunit_comp[b]*protein_g_mol[b] if b in protein_g_mol.index else np.nan
for b in iso]) for iso in isozymes]
r_to_weights[r.id] = np.mean(weights)
return r_to_weights
def convert_copies_fL_to_mg_gCDW(E):
tmp = convert_copies_fL_to_mmol_gCDW(E)
return convert_mmol_gCDW_to_mg_gCDW(tmp)
def get_umol_gCDW_min_from_pFVA(pFVA):
conds = pFVA.index.levels[0]
x = pFVA.loc[[(c, 'maximum') for c in conds]]
x.set_index(conds, inplace=True)
x = x[x>1e-10]
return (x * 1000) / 60
def gene_to_flux_carrying_rxns(V,model,use_cache=False):
if use_cache:
with open('../cache/genes_to_flux_carrying_reactions.p', 'rb') as fp:
return pickle.load(fp)
out = {}
for c in V.columns:
out[c] = {}
vc = V[c]
vc = vc[vc>0]
for g in model.genes:
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
if len(rxns)>0:
out[c][g.id] = rxns
with open('../cache/genes_to_flux_carrying_reactions.p', 'wb') as fp:
pickle.dump(out, fp)
return out
def convert_SA_to_kcat(SA, MW):
# MW in units of kDa
return SA.mul(MW) / 60
def flux_carrying_reactions_to_enzymes(V,E,model,use_cache=False):
if use_cache:
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'rb') as fp:
return pickle.load(fp)
try:
V = V.drop('flux_counter')
except ValueError:
print "flux couter already removed"
mapper = {}
for c in V.columns:
mapper[c] = {}
#use only flux carrying reactions in a given condition
vc = V[c]
vc = vc[vc>0]
reactions = map(str,model.reactions)
for rid in vc.index:
if rid not in reactions:
continue
r = model.reactions.get_by_id(rid)
genes = {g.id:g for g in r.genes}
# annoing gene in the model - just ignore the reaction it carries
if 's0001' in genes: continue
mapper[c][r.id] = {}
for i, (gid, g) in enumerate(genes.iteritems()):
rxns = {r.id for r in list(g.reactions)} & set(vc.index)
mapper[c][rid][gid] = float(len(rxns))
with open('../cache/flux_carrying_reactions_to_enzymes.p', 'wb') as fp:
pickle.dump(mapper, fp)
return mapper
def specific_activity(V,E,model):
mapper = flux_carrying_reactions_to_enzymes(V,E,model)
V = V.to_dict()
E = E.to_dict()
SA = {}
for c,reactions in V.iteritems():
SA[c] = {}
for r,v in reactions.iteritems():
if r in mapper[c]:
genes = mapper[c][r]
abundance = E[c]
weight = sum([abundance[e] / genes[e] for e in genes])
if np.isfinite(weight) and weight > 0:
SA[c][r] = V[c][r] / weight
else:
SA[c][r] = np.nan
SA = pd.DataFrame.from_dict(SA)
return SA
def enzyme_capacity_usage(SA):
kmax = SA.max(axis=1)
return SA.div(kmax,axis=0)
def metabolic_capacity(V,E,model):
tmp = gene_to_flux_carrying_rxns(V,model)
capacity = pd.Series({c:E.loc[tmp[c].keys()][c].sum() for c in V.columns})
return capacity
def metabolic_capacity_usage(V,E,model):
capacity = metabolic_capacity(V,E,model)
SA = specific_activity(V,E,model)
ECU = enzyme_capacity_usage(SA)
E = (V/SA).loc[SA.index]
return (ECU.mul(E)).sum() / capacity
def bootstrap_capacity_usage_error(V,E,model,iterations=10):
UC = pd.DataFrame(index=range(iterations),columns=V.columns)
for i in xrange(iterations):
newE = pd.DataFrame(index=E.index, columns=E.columns)
for c in V.columns:
x = E[c]
x = x[x>0]
rand = np.random.choice(x.values, len(x), replace=True)
newE[c][x.index] = rand
newE.replace(np.nan, 0, inplace=True)
UC.loc[i] = get_capacity_usage(V,newE,model)
return UC.std()
#def get_foldchange(V,E,gc):
#
# gr = gc['growth rate [h-1]']
#
# combs_all = [(i,j) for (i,j) in combinations(gc.index, 2) if gr[j] > gr[i]]
# delta_mu = pd.Series(data = map(lambda x: np.log2(gr[x[1]]/gr[x[0]]), combs_all),
# index = combs_all)
# delta_p = pd.DataFrame(index=reactions, columns=combs)
# delta_v = pd.DataFrame(index=reactions, columns=combs)
# for (i, j) in combs:
# delta_p[(i,j)] = np.log2(p[j] / p[i])
# delta_v[(i,j)] = np.log2(v[j] / v[i])
# return delta_p, delta_v, delta_mu
def get_surface_to_volume_ratio(length,width):
# cylinder + sphere
volume = np.pi*(length-width)*(width/2)**2 + 4/3*np.pi*(width/2)**3# um^3
surface = 2*np.pi*(length-width)*(width/2) + 4*np.pi*(width/2)**2# um^2
return surface, volume, surface/volume
def optimize_growth(model, cs):
rxns = {r.id:r for r in model.reactions}
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
except KeyError:
print "%s is not in the model, using glucose instead" %cs
rxns['EX_glc_e'].lower_bound = -1000
rxns['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 0
rxns['Ec_biomass_iJO1366_WT_53p95M'].objective_coefficient = 1
model.optimize()
return
def get_maximal_growth_rate(model, Vmax, condition):
Vmax = Vmax[condition].copy()
Vmax = Vmax.dropna()
Vmax = Vmax * 60 / 1000 # convert to units of mmol/gCDW/h
rxns = {r.id:r for r in model.reactions}
initial_bound = {}
for r in Vmax.index:
initial_bound[rxns[r]] = rxns[r].upper_bound
rxns[r].upper_bound = Vmax[r]
optimize_growth(model, gc['media_key'][condition])
for r,ub in initial_bound.iteritems():
r.upper_bound = ub
return model.solution.f
def get_rand_ECU(ECU,model):
reactions = [str(r) for r in model.reactions]
conds = ECU.columns
rand_ECU = pd.DataFrame(columns=conds, index=reactions)
for c in conds:
tmp = ECU[c].dropna()
rand_ECU[c] = np.random.gamma(tmp.mean(),tmp.std(),len(reactions))
return rand_ECU
def perform_pFBA(condition):
cs = gc['media_key'].loc[condition]
gr = gc['growth rate [h-1]'].loc[condition]
m = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
m.reactions.get_by_id('EX_glc_e').lower_bound = 0
convert_to_irreversible(m)
reac = dict([(r.id, r) for r in m.reactions])
try:
reac['EX_' + cs + '_e'].lower_bound = -1000 # redefine sole carbon source uptake reaction in mmol/gr/h
except KeyError:
raise 'media key not in model'
reac['Ec_biomass_iJO1366_core_53p95M'].objective_coefficient = 0
reac['Ec_biomass_iJO1366_WT_53p95M'].objective_coefficient = 1
reac['Ec_biomass_iJO1366_WT_53p95M'].upper_bound = gr
print "solving pFBA for %s" %condition
optimize_minimal_flux(m, already_irreversible=True)
return pd.Series(m.solution.x_dict)
#conditions = gc.dropna(subset=['media_key']).index
#gr = gc['growth rate [h-1]'][conditions]
#gr.sort()
#conditions = gr.index
#
#mmol_gCDW_h = pd.DataFrame(columns=conditions, index=rxns.keys())
#for c in conditions:
# mmol_gCDW_h[c] = perform_pFBA(c)
#mmol_gCDW_h.to_csv("../data/flux[mmol_gCDW_h].csv")
#
#if __name__ == "__main__":
# model_fname = "../data/iJO1366.xml"
# model = create_cobra_model_from_sbml_file(model_fname)
# convert_to_irreversible(model)
# reactions = map(lambda x: x.id, model.reactions)
# fluxes = perform_pFBA(model, 'glc', 0.5, 18.5)
'''
x = x.dropna()
w = w.dropna()
ix = x.index & w.index
x = x[ix].values
w = w[ix].values
Mw = np.zeros(1000)
for i in xrange(1000):
rand = np.random.choice(range(len(x)), len(x), replace=True)
newx = x[rand]
neww = w[rand]
Mw[i] = sum(newx*neww)/sum(neww)
return np.std(Mw)
'''
# print len(fva.keys())
# return fva
#map_proteomics(copies_cell_persist)
#map_proteomics(protein_info)
#map_proteomics(fg_cell_old)
#
#x = copies_cell_persist[new_conditions]
#y = copies_cell_persist['Protein molecular weight']
#fg_cell_persist = x.mul(y,axis=0) / (6.022*1e8)
#
#fg_cell = fg_cell_old.join(fg_cell_persist, how='outer')
#fg_fL = fg_cell.div(fL_cell)
#
#mg_gCDW = fg_fL[gr.index]/(1100/3)*1000 # cell density is 1100 g/L; DW fraction is 1/3
##mg_gCDW.to_csv('../data/mg_gCDW.csv')
##
#out = protein_info.join(mg_gCDW)
#out.to_csv('../data/protein_abundance[mg_gCDW].csv', sep='\t')
#plt.figure()
#ax = plt.axes()
#old = fg_cell_old.index
#new = copies_cell_persist.index
#venn2([old, new], set_labels=('Schmidt et al.', 'Persisters'),set_colors=('#4a6b8a','#801515'),ax=ax)
#plt.tight_layout()
#plt.savefig('../res/comparing coverage.svg')
| mit |
JeffsanC/uavs | src/rpg_svo/svo_analysis/src/svo_analysis/analyse_timing.py | 17 | 3476 | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
def analyse_timing(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
n_frames = len(is_frame)
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot total time for frame processing
avg_time = np.mean(D['tot_time'][is_frame])*1000;
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(111, ylabel='processing time [ms]', xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['tot_time'][is_frame]*1000, 'g-', label='total time [ms]')
ax.plot(D['timestamp'][is_frame], np.ones(n_frames)*avg_time, 'b--', label=str('%(time).1fms mean time' % {'time': avg_time}))
ax.legend()
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([
D['tot_time'][is_frame]*1000,
# D['t_local_ba'][is_kf]*1000,
D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000,
D['reproject'][is_frame]*1000,
D['sparse_img_align'][is_frame]*1000,
D['pyramid_creation'][is_frame]*1000
], 0,'', vert=0)
boxplot_labels = [
r'\textbf{Total Motion Estimation: %2.2fms}' % np.median(D['tot_time'][is_frame]*1000),
# 'Local BA (KF only): %.2fms ' % np.median(D['local_ba'][is_kf]*1000),
'Refinement: %2.2fms' % np.median(D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['reproject'][is_frame]*1000),
'Sparse Image Alignment: %2.2fms' % np.median(D['sparse_img_align'][is_frame]*1000),
'Pyramid Creation: %2.2fms' % np.median(D['pyramid_creation'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_boxplot.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot reprojection
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([ D['reproject'][is_frame]*1000,
D['feature_align'][is_frame]*1000,
D['reproject_candidates'][is_frame]*1000,
D['reproject_kfs'][is_frame]*1000 ], 0, '', vert=0)
boxplot_labels = [r'\textbf{Total Reprojection: %2.2fms}' % np.median(D['reproject'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['feature_align'][is_frame]*1000),
'Reproject Candidates: %2.2fms' % np.median(D['reproject_candidates'][is_frame]*1000),
'Reproject Keyframes: %2.2fms' % np.median(D['reproject_kfs'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_reprojection.pdf'), bbox_inches="tight") | gpl-2.0 |
changbindu/rufeng-finance | src/tushare/tushare/internet/boxoffice.py | 7 | 7205 | # -*- coding:utf-8 -*-
"""
电影票房
Created on 2015/12/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import time
import json
def realtime_boxoffice(retry_count=3,pause=0.001):
"""
获取实时电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
BoxOffice 实时票房(万)
Irank 排名
MovieName 影片名
boxPer 票房占比 (%)
movieDay 上映天数
sumBoxOffice 累计票房(万)
time 数据获取时间
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.MOVIE_BOX%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data2'])
df = df.drop(['MovieImg','mId'], axis=1)
df['time'] = du.get_now()
return df
def day_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单日电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
AvgPrice 平均票价
AvpPeoPle 场均人次
BoxOffice 单日票房(万)
BoxOffice_Up 环比变化 (%)
IRank 排名
MovieDay 上映天数
MovieName 影片名
SumBoxOffice 累计票房(万)
WomIndex 口碑指数
"""
for _ in range(retry_count):
time.sleep(pause)
try:
if date is None:
date = 0
else:
date = int(du.diff_day(du.today(), date)) + 1
request = Request(ct.BOXOFFICE_DAY%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['MovieImg', 'BoxOffice1', 'MovieID', 'Director', 'IRank_pro'], axis=1)
return df
def month_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单月电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一月,格式YYYY-MM
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Irank 排名
MovieName 电影名称
WomIndex 口碑指数
avgboxoffice 平均票价
avgshowcount 场均人次
box_pro 月度占比
boxoffice 单月票房(万)
days 月内天数
releaseTime 上映日期
"""
if date is None:
date = du.day_last_week(-30)[0:7]
elif len(date)>8:
print(ct.BOX_INPUT_ERR_MSG)
return
date += '-01'
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_MONTH%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['defaultImage', 'EnMovieID'], axis=1)
return df
def day_cinema(date=None, retry_count=3, pause=0.001):
"""
获取影院单日票房排行数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Attendance 上座率
AvgPeople 场均人次
CinemaName 影院名称
RowNum 排名
TodayAudienceCount 当日观众人数
TodayBox 当日票房
TodayShowCount 当日场次
price 场均票价(元)
"""
if date is None:
date = du.day_last_week(-1)
data = pd.DataFrame()
ct._write_head()
for x in range(1, 11):
df = _day_cinema(date, x, retry_count,
pause)
if df is not None:
data = pd.concat([data, df])
data = data.drop_duplicates()
return data.reset_index(drop=True)
def _day_cinema(date=None, pNo=1, retry_count=3, pause=0.001):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_CBD%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, pNo, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['CinemaID'], axis=1)
return df
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| lgpl-3.0 |
ryanbressler/ClassWar | Clin/sklrf.py | 6 | 1280 | import sys
from sklearn.datasets import load_svmlight_file
from sklearn.ensemble import RandomForestClassifier
from time import time
import numpy as np
def dumptree(atree, fn):
from sklearn import tree
f = open(fn,"w")
tree.export_graphviz(atree,out_file=f)
f.close()
# def main():
fn = sys.argv[1]
X,Y = load_svmlight_file(fn)
rf_parameters = {
"n_estimators": 500,
"n_jobs": 1
}
clf = RandomForestClassifier(**rf_parameters)
X = X.toarray()
print clf
print "Starting Training"
t0 = time()
clf.fit(X, Y)
train_time = time() - t0
print "Training on %s took %s"%(fn, train_time)
print "Total training time (seconds): %s"%(train_time)
if len(sys.argv) == 2:
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
print "Score: %s, %s / %s "%(score, count, len(Y))
else:
fn = sys.argv[2]
X,Y = load_svmlight_file(fn)
X = X.toarray()
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
c1 = np.sum(clf.predict(X[Y==1])==Y[Y==1] )
c0 = np.sum(clf.predict(X[Y==0])==Y[Y==0] )
l = len(Y)
print "Error: %s"%(1-(float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
print "Testing Score: %s, %s / %s, %s, %s, %s "%(score, count, l, c1, c0, (float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
# if __name__ == '__main__':
# main()
| bsd-3-clause |
rgommers/statsmodels | statsmodels/graphics/tests/test_dotplot.py | 1 | 14629 | import numpy as np
from statsmodels.graphics.dotplots import dot_plot
import pandas as pd
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
if matplotlib.__version__ < '1':
raise
have_matplotlib = True
except:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_all():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_dotplot.pdf")
else:
pdf = None
# Basic dotplot with points only
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot")
close_or_save(pdf, fig)
# Basic vertical dotplot
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot")
close_or_save(pdf, fig)
# Tall and skinny
plt.figure(figsize=(4,12))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax)
ax.set_title("Tall and skinny dotplot")
ax.set_xlabel("x axis label")
close_or_save(pdf, fig)
# Short and wide
plt.figure(figsize=(12,4))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Short and wide dotplot")
ax.set_ylabel("y axis label")
close_or_save(pdf, fig)
# Tall and skinny striped dotplot
plt.figure(figsize=(4,12))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True)
ax.set_title("Tall and skinny striped dotplot")
ax.set_xlim(-10, 50)
close_or_save(pdf, fig)
# Short and wide striped
plt.figure(figsize=(12,4))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True, horizontal=False)
ax.set_title("Short and wide striped dotplot")
ax.set_ylim(-10, 50)
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot with few lines")
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot with few lines")
close_or_save(pdf, fig)
# Manually set the x axis limits
plt.figure()
ax = plt.axes()
points = np.arange(20)
fig = dot_plot(points, ax=ax)
ax.set_xlim(-10, 30)
ax.set_title("Dotplot with adjusted horizontal range")
close_or_save(pdf, fig)
# Left row labels
plt.clf()
ax = plt.axes()
lines = ["ABCDEFGH"[np.random.randint(0, 8)] for k in range(20)]
points = np.random.normal(size=20)
fig = dot_plot(points, lines=lines, ax=ax)
ax.set_title("Dotplot with user-supplied labels in the left margin")
close_or_save(pdf, fig)
# Left and right row labels
plt.clf()
ax = plt.axes()
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::")
ax.set_title("Dotplot with user-supplied labels in both margins")
close_or_save(pdf, fig)
# Both sides row labels
plt.clf()
ax = plt.axes([0.1, 0.1, 0.88, 0.8])
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
horizontal=False)
txt = ax.set_title("Vertical dotplot with user-supplied labels in both margins")
txt.set_position((0.5, 1.06))
close_or_save(pdf, fig)
# Custom colors and symbols
plt.clf()
ax = plt.axes([0.1, 0.07, 0.78, 0.85])
points = np.random.normal(size=20)
lines = np.kron(range(5), np.ones(4)).astype(np.int32)
styles = np.kron(np.ones(5), range(4)).astype(np.int32)
#marker_props = {k: {"color": "rgbc"[k], "marker": "osvp"[k],
# "ms": 7, "alpha": 0.6} for k in range(4)}
# python 2.6 compat, can be removed later
marker_props = dict((k, {"color": "rgbc"[k], "marker": "osvp"[k],
"ms": 7, "alpha": 0.6}) for k in range(4))
fig = dot_plot(points, lines=lines, styles=styles, ax=ax,
marker_props=marker_props)
ax.set_title("Dotplot with custom colors and symbols")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals
plt.clf()
ax = plt.axes()
points = range(20)
fig = dot_plot(points, intervals=np.ones(20), ax=ax)
ax.set_title("Dotplot with symmetric intervals")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals, pandas inputs.
plt.clf()
ax = plt.axes()
points = pd.Series(range(20))
intervals = pd.Series(np.ones(20))
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with symmetric intervals (Pandas inputs)")
close_or_save(pdf, fig)
# Basic dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Vertical dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax, horizontal=False)
ax.set_title("Vertical dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Dotplot with nonsymmetric intervals, adjust line properties
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for x in range(20)]
line_props = {0: {"color": "lightgrey",
"solid_capstyle": "round"}}
fig = dot_plot(points, intervals=intervals, line_props=line_props, ax=ax)
ax.set_title("Dotplot with custom line properties")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
styles_order=["Dog", "Cat"])
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
styles_order = ["Dog", "Cat"]
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
horizontal=False, styles_order=styles_order)
handles, labels = ax.get_legend_handles_labels()
lh = dict(zip(labels, handles))
handles = [lh[l] for l in styles_order]
leg = plt.figlegend(handles, styles_order, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, striped=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
plt.ylim(-20, 20)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax)
ax.set_title("Dotplot with sections")
close_or_save(pdf, fig)
# Vertical dotplot with sections
plt.clf()
ax = plt.axes([0.1,0.1,0.9,0.75])
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles,
sections=sections, ax=ax, horizontal=False)
txt = ax.set_title("Vertical dotplot with sections")
txt.set_position((0.5, 1.08))
close_or_save(pdf, fig)
# Reorder sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax,
section_order=["Byy", "Axx", "Czz"])
ax.set_title("Dotplot with sections in specified order")
close_or_save(pdf, fig)
# Reorder the lines.
plt.figure()
ax = plt.axes()
points = np.arange(4)
lines = ["A", "B", "C", "D"]
line_order = ["B", "C", "A", "D"]
fig = dot_plot(points, lines=lines, line_order=line_order, ax=ax)
ax.set_title("Dotplot with reordered lines")
close_or_save(pdf, fig)
# Dotplot with different numbers of points per line
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = []
ii = 0
while len(lines) < 40:
for k in range(np.random.randint(1, 4)):
lines.append(ii)
ii += 1
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with different numbers of points per line")
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/user_interfaces/embedding_in_tk.py | 9 | 1419 | #!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
root = Tk.Tk()
root.wm_title("Embedding in TK")
f = Figure(figsize=(5,4), dpi=100)
a = f.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
a.plot(t,s)
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.update()
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def on_key_event(event):
print('you pressed %s'%event.key)
key_press_handler(event, canvas, toolbar)
canvas.mpl_connect('key_press_event', on_key_event)
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
button = Tk.Button(master=root, text='Quit', command=_quit)
button.pack(side=Tk.BOTTOM)
Tk.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
| mit |
jongyeob/swpy | swpy/backup/dst.py | 1 | 7373 | '''
Author : Jongyeob Park (pjystar@gmail.com)
Seonghwan Choi (shchoi@kasi.re.kr)
'''
import os
import re
from swpy import utils
from swpy.utils import config, download as dl
from swpy.utils import datetime as dt
DATA_DIR = 'data/kyoto/dst/%Y/'
DATA_FILE = 'dst_%Y%m.txt'
DST_KEYS = ['datetime','dst']
LOG = utils.get_logger(__name__)
_RE_WEBFILE = ''
def initialize(**kwargs):
utils.config.set(globals(),**kwargs)
def request(start,end=''):
stime = dt.parse(start)
etime = stime
if not end:
etime = dt.parse(end)
utils.filepath.request_files(DATA_DIR + DATA_FILE, stime, etime)
def download(start,end='',overwrite=False):
download_web(start,end,overwrite)
def download_web(begin, end="",overwrite=False):
'''
Download from kyoto web pages
'''
begin_dt = dt.trim(begin,3,'start')
end_dt = begin_dt
if end != '':
end_dt = dt.trim(end,3,'end')
#
now_dt = begin_dt
while ( now_dt <= end_dt ):
contents = ''
suffix = "fnl"
src = "http://wdc.kugi.kyoto-u.ac.jp/dst_final/%(yyyy)04d%(mm)02d/index.html"%{"yyyy":now_dt.year, "mm":now_dt.month}
contents = dl.download_http_file(src,overwrite=overwrite)
if contents == '':
suffix = "prv";
src = "http://wdc.kugi.kyoto-u.ac.jp/dst_provisional/%(yyyy)04d%(mm)02d/index.html"%{"yyyy":now_dt.year, "mm":now_dt.month}
contents = dl.download_http_file(src,overwrite=overwrite)
if contents == '':
suffix = "rt";
src = "http://wdc.kugi.kyoto-u.ac.jp/dst_realtime/%(yyyy)04d%(mm)02d/index.html"%{"yyyy":now_dt.year, "mm":now_dt.month}
contents = dl.download_http_file(src,overwrite=overwrite)
if contents == '':
mr = dt.monthrange(now_dt.year, now_dt.month)
now_dt = now_dt + dt.timedelta(days=mr[1])
continue
else:
print ("Downloaded %s."%src)
dstpath = dt.replace(DATA_DIR + DATA_FILE,now_dt)
i1 = contents.find("<pre class=\"data\">")
i1 = i1 + "<pre class=\"data\">".__len__()
i1 = contents.find("-->", i1)
i2 = contents.find("</pre>")
i2 = contents.rfind("<!--", i1, i2)
if (i1 != -1 and i2 != -1):
i1 = i1+3
i2 = i2-1
contents = contents[i1:i2]
contents = contents.lstrip("\r\n")
# write a new dst
utils.filepath.mkpath(dstpath)
fw = open(dstpath, "wt")
fw.write(contents)
fw.close()
#
mr = dt.monthrange(now_dt.year, now_dt.month)
now_dt = now_dt + dt.timedelta(days=mr[1])
def download_cgi(begin, end='',overwrite=False):
'''
Download dst data from cgi
'''
if not end:
end = begin
begin_dt, end_dt = dt.trim(begin,3,'start'),dt.trim(end,3,'end')
now_dt = begin_dt
while ( now_dt <= end_dt ):
year = now_dt.strftime("%Y")
url_cgi = "http://wdc.kugi.kyoto-u.ac.jp/cgi-bin/dstae-cgi?SCent=%(cent)s&STens=%(tens)s&SYear=%(year)s&SMonth=%(month)s&ECent=%(cent)s&ETens=%(tens)s&EYear=%(year)s&EMonth=%(month)s&Image+type=GIF&COLOR=COLOR&AE+Sensitivity=0&Dst+Sensitivity=0&Output=DST&Out+format=WDC&Email=code@swpy.org"%{
"cent":year[0:2],
"tens":year[2:3],
"year":year[3:4],
"month":now_dt.month}
# download Dst file
file_path = dt.replace(DATA_DIR + DATA_FILE,now_dt)
utils.filepath.mkpath(file_path)
rv = dl.download_http_file(url_cgi, file_path,overwrite=overwrite)
if (rv == False):
print "Fail to download %s."%(file_path)
else:
print "Download %s."%(file_path)
#
mr = dt.monthrange(now_dt.year, now_dt.month)
now_dt = now_dt + dt.timedelta(days=mr[1])
def load(begin, end=""):
if end == "":
end = begin
begin_dt,end_dt = dt.parse(begin),dt.parse(end)
records = []
for t in dt.datetime_range(begin_dt, end_dt, months=1):
file_path = dt.replace(DATA_DIR + DATA_FILE,t)
if not os.path.exists(file_path):
LOG.warn("File is not exist - {}".format(file_path))
continue
record = load_webfile(file_path)
records.extend(record)
records = dt.filter(records, begin_dt,end_dt)
return records
def load_webfile(file_path):
'''
Load dst file
'''
records = []
with open(file_path, "r") as f:
contents = f.read()
lines = contents.splitlines()
m,y = lines[2].split()
my = dt.datetime.strptime("{} {}".format(m,y),"%B %Y")
if my is None:
return records
width = 4
for line in lines[6:]:
if line == '' : #blank line
continue
cur = 2 # cursor
day = int(line[0:cur])
cur += 1
for i in range(3):
for j in range(8):
ymdh = my.replace(day=day,hour=i*8 + j)
val = line[cur:cur+width]
if val == '9999':
continue
records.append([ymdh.strftime("%Y%m%d_%H%M%S"), float(val)])
cur += width
cur += 1
return records
def load_cgifile(file_path):
'''
Load dst file
'''
records = []
with open(file_path, "r") as f:
contents = f.read()
for line in contents.splitlines():
# Index name
if (line[0:3] != "DST"):
continue
# 3-4 : the last two digits of the year
# 14-15 : top two digits of the year (19 or space for 19xx, 20 from 2000)
y = int(line[14:16] + line[3:5])
if (y < 100): y=y+1900
m = int(line[5:7]) # month
d = int(line[8:10]) # day
version = int(line[13:14])
for h in range(0, 24):
v1 = dt.datetime(y, m, d, h, 0, 0).strftime('%Y%m%d_%H%M%S')
v2 = version
v3 = int(line[20+h*4:20+h*4+4])
records.append([v1,v2,v3])
return records
def draw(dstdata,file_path=""):
from matplotlib import pyplot as plt
color = ['#3366cc', '#dc3912', '#ff9900', '#109618', '#990099']
# Figure
fig = plt.figure(facecolor='white')
# ticks
plt.rc('xtick.major', pad=12);
plt.rc('xtick.major', size=6);
plt.rc('ytick.major', pad=12);
plt.rc('ytick.major', size=8);
plt.rc('ytick.minor', size=4);
# Title
plt.title("Dst Index")
# Plot
plt.plot(dstdata, color=color[0])
# Scale
plt.yscale('linear')
# Limitation
#plt.xlim(tick_dt[0], tick_dt[days-1])
plt.ylim([-200, 50])
# Labels for X and Y axis
#plt.xlabel("%s ~ %s [UTC]"% (tick_dt[0][0:10],tick_dt[days-1][0:10]),fontsize=14)
plt.ylabel("Dst Index")
# Grid
plt.grid(True)
# Show or Save
if (file_path == ""):
plt.show()
else:
fig.savefig(file_path)
return fig
| gpl-2.0 |
fspaolo/scikit-learn | examples/mixture/plot_gmm_sin.py | 12 | 2726 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 gaussian components, finds too-small components and very
little structure. The fits by the dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import pylab as pl
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = pl.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
pl.xlim(-6, 4 * np.pi - 6)
pl.ylim(-5, 5)
pl.title(title)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
TitasNandi/Summer_Project | yodaqa/data/ml/fbpath/fbpath_train_logistic.py | 3 | 2964 | #!/usr/bin/python
#
# Train a Naive Bayes classifier to predict which Freebase
# property paths would match answers given the question features.
#
# Usage: fbpath_train_logistic.py TRAIN.JSON MODEL.JSON
import json
import numpy as np
from fbpathtrain import VectorizedData
import random
import re
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
import sys
import time
def dump_cfier(cfier, Xdict, Ydict):
print('/// Model is %s' % (re.sub('\n\\s*', ' ', str(cfier)),))
print('{')
for cls, cfr in zip(cfier.classes_, cfier.estimators_):
weights = dict()
for feat_i in np.nonzero(cfr.coef_[0] != 0)[0]:
weights[Xdict.feature_names_[feat_i]] = cfr.coef_[0][feat_i]
if not weights:
continue
weights['_'] = cfr.intercept_[0]
print(' "%s": %s%s' % (Ydict.classes_[cls], json.dumps(weights),
',' if cls != cfier.classes_[-1] else ''))
print('}')
if __name__ == "__main__":
trainfile, valfile = sys.argv[1:]
# Seed always to the same number to get reproducible builds
# TODO: Make this configurable on the command line or in the environment
random.seed(17151713)
print('/// The weights of individual question features for each fbpath.')
print('/// Missing features have weight zero. Classifiers with no features are skipped.')
print('// These weights are output by data/ml/fbpath/fbpath-train-logistic.py as this:')
print('//')
## Training
with open(trainfile, 'r') as f:
traindata = VectorizedData(json.load(f))
print('// traindata: %d questions, %d features, %d fbpaths' % (
np.size(traindata.X, axis=0), np.size(traindata.X, axis=1), np.size(traindata.Y, axis=1)))
sys.stdout.flush()
# class_weight='auto' produces reduced performance, val mrr 0.574 -> 0.527
# (see the notebook)
# We use L1 regularization mainly to minimize the output model size,
# though it seems to yield better precision+recall too.
t_start = time.clock()
cfier = OneVsRestClassifier(LogisticRegression(penalty='l1'), n_jobs=4)
cfier.fit(traindata.X, traindata.Y)
t_end = time.clock()
print('// training took %d seconds' % (t_end-t_start,))
sys.stdout.flush()
## Benchmarking
with open(valfile, 'r') as f:
valdata = VectorizedData(json.load(f), traindata.Xdict, traindata.Ydict)
print('// valdata: %d questions' % (np.size(valdata.X, axis=0),))
sys.stdout.flush()
val_score = valdata.cfier_score(cfier, lambda cfier, X: cfier.predict_proba(X))
print('// val sklScore %.3f, qRecallAny %.3f, qRecallAll %.3f, pathPrec %.3f, [qScoreMRR %.3f]' % (
val_score['sklScore'],
val_score['qRecallAny'], val_score['qRecallAll'], val_score['pPrec'],
val_score['qScoreMRR']))
sys.stdout.flush()
## Data Dump
dump_cfier(cfier, traindata.Xdict, traindata.Ydict)
| apache-2.0 |
igormarfin/trading-with-python | lib/functions.py | 76 | 11627 | # -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df) | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.