repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
zaxtax/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
daodaoliang/neural-network-animation | matplotlib/docstring.py | 23 | 3995 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import cbook
import sys
import types
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
assert not (len(args) and len(kwargs)), \
"Only positional or keyword args are allowed"
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or
dictionary) and it may change before this class is called, one may
explicitly use a reference to the params rather than using *args or
**kwargs which will copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join=''):
self.addendum = addendum
self.join = join
def __call__(self, func):
docitems = [func.__doc__, self.addendum]
func.__doc__ = func.__doc__ and self.join.join(docitems)
return func
def dedent(func):
"Dedent a docstring (if present)"
func.__doc__ = func.__doc__ and cbook.dedent(func.__doc__)
return func
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
# create a decorator that will house the various documentation that
# is reused throughout matplotlib
interpd = Substitution()
def dedent_interpd(func):
"""A special case of the interpd that first performs a dedent on
the incoming docstring"""
if isinstance(func, types.MethodType) and not six.PY3:
func = func.im_func
return interpd(dedent(func))
def copy_dedent(source):
"""A decorator that will copy the docstring from the source and
then dedent it"""
# note the following is ugly because "Python is not a functional
# language" - GVR. Perhaps one day, functools.compose will exist.
# or perhaps not.
# http://mail.python.org/pipermail/patches/2007-February/021687.html
return lambda target: dedent(copy(source)(target))
| mit |
ptkool/spark | python/pyspark/sql/context.py | 5 | 18889 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since, _NoValue
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.udf import UDFRegistration
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return self.sparkSession.udf
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@since(1.2)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
DeprecationWarning)
return self.sparkSession.udf.register(name, f, returnType)
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. note:: Deprecated in 2.3.0. Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
DeprecationWarning)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
cdegroc/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 1 | 28259 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD Style.
"""Implementation of Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
import warnings
from ..externals.joblib import Parallel, delayed
from ..base import RegressorMixin
from ..base import ClassifierMixin
from ..feature_selection.selector_mixin import SelectorMixin
from .base import BaseSGD
from ..utils import atleast2d_or_csr, check_arrays
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..utils import deprecated
from .sgd_fast import plain_sgd as plain_sgd
from .sgd_fast import ArrayDataset, CSRDataset
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
def _make_dataset(X, y_i, sample_weight):
"""Returns Dataset object + intercept_decay"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = 0.01
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _tocsr(X):
"""Convert X to CSR matrix, preventing a copy if possible"""
if sp.isspmatrix_csr(X) and X.dtype == np.float64:
return X
else:
return sp.csr_matrix(X, dtype=np.float64)
class SGDClassifier(BaseSGD, ClassifierMixin, SelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD.
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'hinge' or 'log' or 'modified_huber'
The loss function to be used. Defaults to 'hinge'. The hinge loss is
a margin loss used by standard linear SVM models. The 'log' loss is
the loss of logistic regression models and can be used for
probability estimation in binary classifiers. 'modified_huber'
is another smooth loss that brings tolerance to outliers.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
rho : float
The Elastic Net mixing parameter, with 0 < rho <= 1.
Defaults to 0.85.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
seed: int, optional
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
eta0 : double
The initial learning rate [default 0.01].
power_t : double
The exponent for inverse scaling learning rate [default 0.25].
class_weight : dict, {class_label : weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, class_weight=None, eta0=0.0,
fit_intercept=True, learning_rate='optimal', loss='hinge',
n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, rho=0.85, seed=0,
shuffle=False, verbose=0, warm_start=False)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
rho=0.85, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, n_jobs=1, seed=0, learning_rate="optimal",
eta0=0.0, power_t=0.5, class_weight=None, warm_start=False):
super(SGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, rho=rho,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose, seed=seed,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
@property
@deprecated("to be removed in v0.12; use ``classes_`` instead.")
def classes(self):
return self.classes_
def _set_loss_function(self, loss):
"""Set concrete LossFunction."""
loss_functions = {
"hinge": Hinge(1.0),
"perceptron": Hinge(0.0),
"log": Log(),
"modified_huber": ModifiedHuber(),
}
try:
self.loss_function = loss_functions[loss]
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _set_class_weight(self, class_weight, classes, y):
"""Estimate class weights for unbalanced datasets."""
if class_weight is None:
# keep the old class_weight if none provided
class_weight = self.class_weight
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight == 'auto':
# proportional to the number of samples in the class
weight = np.array([1.0 / np.sum(y == i) for i in classes],
dtype=np.float64, order='C')
weight *= classes.shape[0] / np.sum(weight)
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
self._expanded_class_weight = weight
def _partial_fit(self, X, y, n_iter, classes=None, sample_weight=None):
X = safe_asarray(X, dtype=np.float64, order="C")
y = np.asarray(y)
n_samples, n_features = X.shape
self._check_fit_data(X, y)
if self.classes_ is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None and self.classes_ is not None:
if not np.all(self.classes_ == np.unique(classes)):
raise ValueError("`classes` is not the same as on last call "
"to partial_fit.")
elif classes is not None:
self.classes_ = classes
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._set_class_weight(self.class_weight, self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init=None, intercept_init=None)
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, sample_weight, n_iter)
elif n_classes == 2:
self._fit_binary(X, y, sample_weight, n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, classes=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if class_weight != None:
warnings.warn("Using 'class_weight' as a parameter to the 'fit'"
"method is deprecated. Set it on initialization instead.",
DeprecationWarning)
self.class_weight = class_weight
return self._partial_fit(X, y, n_iter=1, classes=classes,
sample_weight=sample_weight)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coeffients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if class_weight != None:
warnings.warn("Using 'class_weight' as a parameter to the 'fit'"
"method is deprecated. Set it on initialization instead.",
DeprecationWarning)
self.class_weight = class_weight
X = safe_asarray(X, dtype=np.float64, order="C")
y = np.asarray(y)
n_samples, n_features = X.shape
self._check_fit_data(X, y)
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
n_classes = classes.shape[0]
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
# Allocate datastructures from input arguments.
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
# Need to re-initialize in case of multiple call to fit.
self._init_t()
self._partial_fit(X, y, self.n_iter,
classes=classes,
sample_weight=sample_weight)
# fitting is over, we can now transform coef_ to fortran order
# for faster predictions
self._set_coef(self.coef_)
return self
def decision_function(self, X):
"""Predict signed 'distance' to the hyperplane (aka confidence score)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples] if n_classes == 2 else [n_samples,n_classes]
The signed 'distances' to the hyperplane(s).
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_.T) + self.intercept_
if self.classes_.shape[0] == 2:
return np.ravel(scores)
else:
return scores
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Array containing the predicted class labels.
"""
scores = self.decision_function(X)
if self.classes_.shape[0] == 2:
indices = np.array(scores > 0, dtype=np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[np.ravel(indices)]
def predict_proba(self, X):
"""Predict class membership probability
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples] if n_classes == 2 else [n_samples,
n_classes]
Contains the membership probabilities of the positive class.
"""
if len(self.classes_) != 2:
raise NotImplementedError("predict_(log_)proba only supported"
" for binary classification")
elif not isinstance(self.loss_function, Log):
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' (%s given)" % self.loss)
return 1.0 / (1.0 + np.exp(-self.decision_function(X)))
def _fit_binary(self, X, y, sample_weight, n_iter):
if sp.issparse(X):
X = _tocsr(X)
coef, intercept = fit_binary(self, 1, X, y, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
# need to be 2d
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
if sp.issparse(X):
X = _tocsr(X)
# Use joblib to fit OvA in parallel
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, n_iter,
self._expanded_class_weight[i], 1.,
sample_weight)
for i in xrange(len(self.classes_)))
for i, (coef, intercept) in enumerate(result):
self.coef_[i] = coef
self.intercept_[i] = intercept
def _prepare_fit_binary(est, y, i):
"""Common initialization for _fit_binary_{dense,sparse}.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
if len(est.classes_) == 2:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.coef_[i]
intercept = est.intercept_[i]
return y_i, coef, intercept
def fit_binary(est, i, X, y, n_iter, pos_weight, neg_weight,
sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
y_i, coef, intercept = _prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
return plain_sgd(coef, intercept, est.loss_function,
est.penalty_type, est.alpha, est.rho,
dataset, n_iter, est.fit_intercept,
est.verbose, est.shuffle, est.seed,
pos_weight, neg_weight,
est.learning_rate_code, est.eta0,
est.power_t, est.t_, intercept_decay)
class SGDRegressor(BaseSGD, RegressorMixin, SelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss' or 'huber'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' is an epsilon insensitive
loss function for robust regression.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
rho : float
The Elastic Net mixing parameter, with 0 < rho <= 1.
Defaults to 0.85.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
seed: int, optional
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level.
p : float
Epsilon in the epsilon-insensitive huber loss function;
only if `loss=='huber'`.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
Weights asigned to the features.
`intercept_` : array, shape = [1]
The intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
SGDRegressor(alpha=0.0001, eta0=0.01, fit_intercept=True,
learning_rate='invscaling', loss='squared_loss', n_iter=5, p=0.1,
penalty='l2', power_t=0.25, rho=0.85, seed=0, shuffle=False,
verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
rho=0.85, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, p=0.1, seed=0, learning_rate="invscaling",
eta0=0.01, power_t=0.25, warm_start=False):
self.p = float(p)
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, rho=rho,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose, seed=seed,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=False)
def _set_loss_function(self, loss):
"""Get concrete LossFunction"""
loss_functions = {
"squared_loss": SquaredLoss(),
"huber": Huber(self.p),
}
try:
self.loss_function = loss_functions[loss]
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _partial_fit(self, X, y, n_iter, sample_weight=None,
coef_init=None, intercept_init=None):
X, y = check_arrays(X, y, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)
y = np.asarray(y, dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._check_fit_data(X, y)
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
self._fit_regressor(X, y, sample_weight, n_iter)
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, n_iter=1, sample_weight=sample_weight)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coeffients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
# Need to re-initialize in case of multiple call to fit.
self._init_t()
return self._partial_fit(X, y, self.n_iter, sample_weight,
coef_init, intercept_init)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
self.coef_, intercept = plain_sgd(self.coef_,
self.intercept_[0],
self.loss_function,
self.penalty_type,
self.alpha, self.rho,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
self.seed,
1.0, 1.0,
self.learning_rate_code,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.intercept_ = np.atleast_1d(intercept)
| bsd-3-clause |
jplourenco/bokeh | examples/compat/mpl/subplots.py | 34 | 1826 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
output_file("subplots.html")
show(mpl.to_bokeh())
| bsd-3-clause |
Myoldmopar/SolarCalculations | demos/SolarAngles.py | 1 | 6462 | #!/usr/bin/env python
# add the solar directory to the path so we can import it
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'solar'))
# import the datetime library so we construct proper datetime instances
from datetime import datetime
# import the solar library
import solar
# import the plotting library for demonstration -- pip install matplotlib should suffice
import matplotlib.pyplot as plt
import numpy
# calculate times in Stillwater, OK -- to demonstrate the effect of longitude not lining up with the stdmeridian
longitude = 97.05
stdmeridian = 90
latitude = 36.11
x = []
lct = []
lst = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 6, 21, hour, 00, 00)
lct.append(solar.localCivilTime(dt, True, longitude, stdmeridian))
lst.append(solar.localSolarTime(dt, True, longitude, stdmeridian))
plt.plot(x, 'black', label='Clock Time')
plt.plot(x, lct, 'grey', label='Civil Time', linewidth=6)
plt.plot(x, lst, 'yellow', label='Solar Time')
plt.xlim([0,23])
plt.suptitle("Time Values for Stillwater, OK on June 21", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Time [hours]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAnglesCivilSolarTime.png'))
#### reset
plt.close()
# calculate hour angle for a summer day in Golden, CO
longitude = 105.2
stdmeridian = 105
latitude = 39.75
x = []
hours = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 6, 21, hour, 00, 00)
hours.append(solar.hourAngle(dt, True, longitude, stdmeridian).degrees)
plt.plot(x, hours, 'b', label='Hour Angle')
plt.xlim([0,23])
plt.suptitle("Hour Angle", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Angle [degrees]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAnglesHour.png'))
#### reset
plt.close()
# calculate solar altitude angles for Winter and Summer days in Golden, CO
longitude = 105.2
stdmeridian = 105
latitude = 39.75
x = []
beta_winter = []
beta_summer = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 12, 21, hour, 00, 00)
beta_winter.append(solar.altitudeAngle(dt, False, longitude, stdmeridian, latitude).degrees)
dt = datetime(2001, 6, 21, hour, 00, 00)
beta_summer.append(solar.altitudeAngle(dt, True, longitude, stdmeridian, latitude).degrees)
plt.plot(x, beta_winter, 'b', label='Winter')
plt.plot(x, beta_summer, 'r', label='Summer')
plt.xlim([0,23])
plt.suptitle("Solar Altitude Angle", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Angle [degrees]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAngles1.png'))
#### reset
plt.close()
# calculate solar azimuth angle for a summer day in Golden, CO
longitude = 105.2
stdmeridian = 105
latitude = 39.75
x = []
solar_az = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 6, 21, hour, 00, 00)
solar_az.append(solar.solarAzimuthAngle(dt, True, longitude, stdmeridian, latitude).degrees)
plt.plot(x, solar_az, 'b', label='Solar Azimuth Angle')
plt.xlim([0,23])
plt.suptitle("Solar Azimuth Angle", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Angle [degrees]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAnglesSolarAzimuth.png'))
### reset
plt.close()
# calculate wall azimuth angles for a summer day in Golden, CO
longitude = 105.2
stdmeridian = 105
latitude = 39.75
x = []
east_wall_normal_from_north = 90
east_az = []
south_wall_normal_from_north = 180
south_az = []
west_wall_normal_from_north = 270
west_az = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 6, 21, hour, 00, 00)
east_az.append(solar.wallAzimuthAngle(dt, True, longitude, stdmeridian, latitude, east_wall_normal_from_north).degrees)
south_az.append(solar.wallAzimuthAngle(dt, True, longitude, stdmeridian, latitude, south_wall_normal_from_north).degrees)
west_az.append(solar.wallAzimuthAngle(dt, True, longitude, stdmeridian, latitude, west_wall_normal_from_north).degrees)
plt.plot(x, east_az, 'r', label='East Wall Azimuth Angle')
plt.plot(x, south_az, 'g', label='South Wall Azimuth Angle')
plt.plot(x, west_az, 'b', label='West Wall Azimuth Angle')
plt.xlim([0,23])
plt.ylim([-90,180])
plt.suptitle("Wall Azimuth Angles", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Angle [degrees]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAnglesWallAzimuths.png'))
### reset
plt.close()
# calculate solar angle of incidence for a summer day in Golden, CO
longitude = 105.2
stdmeridian = 105
latitude = 39.75
x = []
east_wall_normal_from_north = 90
east_theta = []
east_az = []
alt = []
for hour in range(0,24): # gives zero-based hours as expected in the datetime constructor
x.append(hour)
dt = datetime(2001, 6, 21, hour, 00, 00)
east_az.append(solar.wallAzimuthAngle(dt, True, longitude, stdmeridian, latitude, east_wall_normal_from_north).degrees)
east_theta.append(solar.solarAngleOfIncidence(dt, True, longitude, stdmeridian, latitude, east_wall_normal_from_north).degrees)
alt.append(solar.altitudeAngle(dt, True, longitude, stdmeridian, latitude).degrees)
plt.plot(x, alt, 'r', label='Solar Altitude Angle')
plt.plot(x, east_az, 'g', label='East Wall Azimuth Angle')
plt.plot(x, east_theta, 'b', label='East Wall Incidence Angle')
plt.xlim([0,23])
plt.ylim([-90,180])
plt.suptitle("Wall Solar Incidence Angles", fontsize=14, fontweight='bold')
plt.xlabel("Hour of Day -- Clock Time")
plt.ylabel("Angle [degrees]")
plt.grid(True, axis='both')
plt.legend()
plt.savefig(os.path.join(os.path.dirname(__file__), '..', '..', 'SolarCalculations.wiki/DemoSolarAnglesSolarIncidence.png'))
| mit |
jmetzen/scikit-learn | benchmarks/bench_isolation_forest.py | 40 | 3136 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http']#, 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
beni55/dipy | doc/examples/reconst_shore_metrics.py | 13 | 3275 | """
===========================
Calculate SHORE scalar maps
===========================
We show how to calculate two SHORE-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your data. SHORE can be used with any multiple b-value
dataset like multi-shell or DSI.
First import the necessary modules:
"""
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.data import get_data, dsi_voxels
from dipy.reconst.shore import ShoreModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.get_affine()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model.
"""
asm = ShoreModel(gtab)
"""
Lets just use only one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Fit the signal with the model and calculate the SHORE coefficients.
"""
asmfit = asm.fit(dataslice)
"""
Calculate the analytical rtop on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = asmfit.rtop_signal()
"""
Now we calculate the analytical rtop on the propagator,
that corresponds to its central value.
"""
print('Calculating... rtop_pdf')
rtop_pdf = asmfit.rtop_pdf()
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Let's calculate the analytical mean square displacement on the propagator.
"""
print('Calculating... msd')
msd = asmfit.msd()
"""
Show the maps and save them in SHORE_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='msd')
ax3.set_axis_off()
ind = ax3.imshow(msd.T, interpolation='nearest', origin='lower', vmin=0)
plt.colorbar(ind)
plt.savefig('SHORE_maps.png')
"""
.. figure:: SHORE_maps.png
:align: center
**rtop and msd calculated using the SHORE model**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
efulet/pca | pca/main.py | 1 | 3043 | """
@created_at 2014-07-15
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import os
import traceback
import sys
from lib import *
def check_version():
"""Python v2.7 es requerida por el curso, entonces verificamos la version"""
if sys.version_info[:2] != (2, 7):
raise Exception("Parece que python v2.7 no esta instalado en el sistema")
def db_path():
"""Retorna el path de las base de datos"""
pathfile = os.path.dirname(os.path.abspath(__file__))
return os.path.join(pathfile, "db")
if __name__ == "__main__":
try:
# Verificar version de python
check_version()
# Cargar los datos
my_pca_lda = FKSkLearn(os.path.join(db_path(), "datos_diabetes.npz"))
# Preparar los datos para validacion
my_pca_lda.fk_train_test_split()
# Se entrena el clasificador PCA + LDA con la dimension optima.
my_pca_lda.fk_pca_lda()
# Contruye el clasificar Bayes usando la libreria sklearn
my_pca_lda.fk_bayes_classifier()
print("**************")
print("sklearn_Bayes:")
print("Number of mislabeled points : %d" % (my_pca_lda.fk_get_y_test() != my_pca_lda.fk_get_y_pred()).sum())
print("Accuracy: ", my_pca_lda.fk_score())
print("**************")
# Implementacion propia del clasificador.
fknb = FKNaiveBayesClassifier()
fknb.fit(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_train())
y_pred_FK = fknb.predict(my_pca_lda.fk_get_lda_test())
print("FK_Bayes")
print("Number of mislabeled points : %d" % (my_pca_lda.fk_get_y_test() != y_pred_FK).sum())
print("Accuracy: ", fknb.score(my_pca_lda.fk_get_lda_test(), my_pca_lda.fk_get_y_test()))
print("**************")
# Esto es para verificar que las predicciones son iguales, deberia entregar una lista vacia.
print("...probando igualdad...")
y_pred_SK = [int(i) for i in my_pca_lda.fk_get_y_pred()]
#print y_pred_SK
#print y_pred_FK
# Se verifica si la lista esta vacia.
if y_pred_SK == y_pred_FK:
print "Son iguales los dos metodos!"
else:
print "No son iguales. :("
# Se grafica la informacion.
graph = Graph(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_train())
graph.frequencies_histogram()
graph.probability_density_functions()
graph.conditional_probability(my_pca_lda.fk_get_lda_train(), my_pca_lda.fk_get_y_prob())
graph.show_graphs()
except Exception, err:
print traceback.format_exc()
finally:
sys.exit()
| mit |
pcm17/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 9272 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/indexes/base.py | 1 | 120893 | import datetime
import warnings
import operator
import numpy as np
import pandas.tslib as tslib
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.base import (PandasObject, FrozenList, FrozenNDArray,
IndexOpsMixin)
import pandas.core.base as base
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.formats.printing import pprint_thing
from pandas.core.common import (isnull, array_equivalent,
is_object_dtype, is_datetimetz, ABCSeries,
ABCPeriodIndex, ABCMultiIndex,
_values_from_object, is_float, is_integer,
is_iterator, is_categorical_dtype,
_ensure_object, _ensure_int64, is_bool_indexer,
is_list_like, is_bool_dtype,
is_integer_dtype, is_float_dtype,
needs_i8_conversion)
from pandas.core.strings import StringAccessorMixin
from pandas.core.config import get_option
# simplify
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='', duplicated='np.array')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_groupby = _algos.groupby_object
_arrmap = _algos.arrmap_object
_left_indexer_unique = _algos.left_join_indexer_unique_object
_left_indexer = _algos.left_join_indexer_object
_inner_indexer = _algos.inner_join_indexer_object
_outer_indexer = _algos.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (issubclass(data.dtype.type, np.datetime64) or
is_datetimetz(data)):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif issubclass(data.dtype.type, np.timedelta64):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
# if we are actually all equal to integers
# then coerce to integer
from .numeric import Int64Index, Float64Index
try:
res = data.astype('i8')
if (res == data).all():
return Int64Index(res, copy=copy,
name=name)
except (TypeError, ValueError):
pass
# return an actual float index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError):
pass
# maybe coerce to a sub-class
from pandas.tseries.period import PeriodIndex
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.floating):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
from .numeric import Int64Index
return Int64Index(subarr.astype('i8'), copy=copy,
name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if (inferred.startswith('datetime') or
tslib.is_timestamp_array(subarr)):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.tseries.index import DatetimeIndex
return DatetimeIndex(subarr, copy=copy, name=name,
**kwargs)
elif (inferred.startswith('timedelta') or
lib.is_timedelta_array(subarr)):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
return PeriodIndex(subarr, name=name, **kwargs)
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or lib.isscalar(data):
cls._scalar_data_error(data)
else:
if (tupleize_cols and isinstance(data, list) and data and
isinstance(data[0], tuple)):
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
try:
# must be orderable in py3
if compat.PY3:
sorted(data)
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
# python2 - MultiIndex fails on mixed types
pass
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if values is None and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
def repeat(self, n, *args, **kwargs):
"""
Repeat elements of an Index. Refer to `numpy.ndarray.repeat`
for more information about the `n` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(n))
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or lib.isscalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([(k, getattr(self, k, None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return Index([item], dtype=self.dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
names = kwargs.get('names')
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if deep:
from copy import deepcopy
new_index = self._shallow_copy(self._data.copy())
name = name or deepcopy(self.name)
else:
new_index = self._shallow_copy()
name = self.name
if name is not None:
names = [name]
if names:
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join([u("%s=%s") % (k, v) for k, v in attrs])
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatted data as a unicode string
"""
return default_pprint
def _format_data(self):
"""
Return the formatted data as a unicode string
"""
from pandas.formats.format import get_console_size, _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
space1 = "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
space2 = "\n%s" % (' ' * (len(self.__class__.__name__) + 2))
n = len(self)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionaly handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max([adj.len(x) for x in values])
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(self[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
else:
head = []
tail = [formatter(x) for x in self]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = []
attrs.append(('dtype', "'%s'" % self.dtype))
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
return self.values.copy()
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name, dtype=dtype)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
if not com.is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
return self.inferred_type in ['categorical']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
# validate / convert indexers
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer. disallow floats in the start/stop/step
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, keyarr, kind=None):
"""
passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self.values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if lib.isscalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not lib.isscalar(result):
return promote(result)
else:
return result
def _ensure_compat_append(self, other):
"""
prepare the append
Returns
-------
list of to_concat, name of result Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if (isinstance(obj, Index) and obj.name != name and
obj.name is not None):
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x._values if isinstance(x, Index) else x
for x in to_concat]
return to_concat, name
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat, name = self._ensure_compat_append(other)
attribs = self._get_attributes_dict()
attribs['name'] = name
return self._shallow_copy_with_infer(
np.concatenate(to_concat), **attribs)
@staticmethod
def _ensure_compat_concat(indexes):
from pandas.tseries.api import (DatetimeIndex, PeriodIndex,
TimedeltaIndex)
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
is_ts = [isinstance(idx, klasses) for idx in indexes]
if any(is_ts) and not all(is_ts):
return [_maybe_box(idx) for idx in indexes]
return indexes
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'])
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = com._ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = com._ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = values.take(indices)
mask = indices == -1
if mask.any():
taken[mask] = na_value
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isnull(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not lib.isscalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError):
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isnull(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
return array_equivalent(_values_from_object(self),
_values_from_object(other))
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return _get_na_value(self.dtype)
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
DEPRECATED: use :meth:`Index.sort_values`
"""
warnings.warn("order is deprecated, use sort_values(...)",
FutureWarning, stacklevel=2)
return self.sort_values(return_indexer=return_indexer,
ascending=ascending)
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
if com.is_list_like(other):
warnings.warn("using '+' to provide set union with Indexes is "
"deprecated, use '|' or .union()", FutureWarning,
stacklevel=2)
if isinstance(other, Index):
return self.union(other)
return Index(np.array(self) + other)
def __radd__(self, other):
if is_list_like(other):
warnings.warn("using '+' to provide set union with Indexes is "
"deprecated, use '|' or .union()", FutureWarning,
stacklevel=2)
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
warnings.warn("using '-' to provide set differences with Indexes is "
"deprecated, use .difference()", FutureWarning,
stacklevel=2)
return self.difference(other)
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return other._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
if not com.is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self.values, other._values)[0]
except TypeError:
# incomparable objects
result = list(self.values)
# worth making this faster? a very unusual case
value_set = set(self.values)
result.extend([x for x in other._values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(other._values, indexer,
allow_fill=False)
result = _concat._concat_compat((self.values, other_diff))
try:
self.values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self.values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Sortedness of the result is not guaranteed.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not com.is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self.values, other._values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(self.values).get_indexer(other._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = Index(self.values).get_indexer_non_unique(
other._values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the sorted set difference of two Index objects.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return Index([], name=self.name)
other, result_name = self._convert_can_do_setop(other)
theDiff = sorted(set(self) - set(other))
return Index(theDiff, name=result_name)
diff = deprecate('diff', difference)
def symmetric_difference(self, other, result_name=None):
"""
Compute the sorted symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped.
The sorting of a result containing ``NaN`` values is not guaranteed
across Python versions. See GitHub issue #6444.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
the_diff = sorted(set((self.difference(other)).
union(other.difference(self))))
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
sym_diff = deprecate('sym_diff', symmetric_difference)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
key = _values_from_object(key)
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, Index) and lib.isscalar(key):
try:
return s[key]
except (IndexError, ValueError):
# invalid type as an indexer
pass
s = _values_from_object(series)
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if lib.isscalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(_values_from_object(arr),
_values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not com.is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
return com._ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance):
# override this method on subclasses
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._values, indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
target = np.asarray(target)
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
def get_indexer_non_unique(self, target):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, to_groupby):
"""
Group the index labels by a given array of values.
Parameters
----------
to_groupby : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
return self._groupby(self.values, _values_from_object(to_groupby))
def map(self, mapper):
"""
Apply mapper function to its values.
Parameters
----------
mapper : callable
Function to be applied.
Returns
-------
applied : array
"""
return self._arrmap(self.values, mapper)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(np.array(self), values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
l = np.arange(len(indexer))
missing = com._ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(l[~check])
cur_labels = self.take(indexer[check])._values
cur_indexer = _ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer = indexer._values
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not com.is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [n for n in self.names if n is not None]
other_names = [n for n in other.names if n is not None]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values],
[other._values], how=how,
sort=True)
left_idx = com._ensure_platform_int(left_idx)
right_idx = com._ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from pandas.algos import groupsort_indexer
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left))
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = groupsort_indexer(new_lev_labels,
ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not lib.isscalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not lib.isscalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'right')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
def _evalute_compare(self, op):
raise base.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
func = getattr(self.values, op)
result = func(np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
return _evaluate_compare
cls.__eq__ = _make_compare('__eq__')
cls.__ne__ = _make_compare('__ne__')
cls.__lt__ = _make_compare('__lt__')
cls.__gt__ = _make_compare('__gt__')
cls.__le__ = _make_compare('__le__')
cls.__ge__ = _make_compare('__ge__')
@classmethod
def _add_numericlike_set_methods_disabled(cls):
""" add in the numeric set-like methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa
cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__')
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
def _validate_for_numeric_unaryop(self, op, opstr):
""" validate if we can perform a numeric unary operation """
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
def _validate_for_numeric_binop(self, other, op, opstr):
"""
return valid other, evaluate or raise TypeError
if we are not of the appropriate type
internal method called by ops
"""
from pandas.tseries.offsets import DateOffset
# if we are an inheritor of numeric,
# but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} with type: {typ}".format(
opstr=type(self),
typ=type(other))
)
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f', 'i']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
# higher up to handle
pass
elif isinstance(other, (Timestamp, np.datetime64)):
# higher up to handle
pass
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False):
def _evaluate_numeric_binop(self, other):
from pandas.tseries.offsets import DateOffset
other = self._validate_for_numeric_binop(other, op, opstr)
# handle time-based others
if isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(values, other), **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(
operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul, '__mul__')
cls.__pow__ = cls.__rpow__ = _make_evaluate_binop(
operator.pow, '__pow__')
cls.__mod__ = _make_evaluate_binop(
operator.mod, '__mod__')
cls.__floordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__', reversed=True)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__', reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div, '__div__')
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (isinstance(result, (np.ndarray, ABCSeries, Index)) and
result.ndim == 0):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function('all', 'Return whether all elements '
'are True',
np.all)
cls.any = _make_logical_function('any',
'Return whether any element is True',
np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
Index._add_comparison_methods()
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT,
np.timedelta64: tslib.NaT}.get(dtype, np.nan)
def _ensure_frozen(array_like, categories, copy=False):
array_like = com._coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
if isinstance(idx, klasses):
return idx.asobject
return idx
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
| mit |
kaichogami/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
bzamecnik/sms-tools | lectures/03-Fourier-properties/plots-code/fft-zero-phase.py | 1 | 1147 | # matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, fftshift
from smst.utils import audio
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
N = 512
M = 401
hN = N / 2
hM = (M + 1) / 2
start = .8 * fs
xw = x[start - hM:start + hM - 1] * np.hamming(M)
plt.figure(1, figsize=(9.5, 6.5))
plt.subplot(411)
plt.plot(np.arange(-hM, hM - 1), xw, lw=1.5)
plt.axis([-hN, hN - 1, min(xw), max(xw)])
plt.title('x (oboe-A4.wav), M = 401')
fftbuffer = np.zeros(N)
fftbuffer[:hM] = xw[hM - 1:]
fftbuffer[N - hM + 1:] = xw[:hM - 1]
plt.subplot(412)
plt.plot(np.arange(0, N), fftbuffer, lw=1.5)
plt.axis([0, N, min(xw), max(xw)])
plt.title('fftbuffer: N = 512')
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X) / N)
pX = np.unwrap(np.angle(X))
plt.subplot(413)
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN, hN - 1, -100, max(mX)])
plt.title('mX')
plt.subplot(414)
plt.plot(np.arange(-hN, hN), pX, 'c', lw=1.5)
plt.axis([-hN, hN - 1, min(pX), max(pX)])
plt.title('pX')
plt.tight_layout()
plt.savefig('fft-zero-phase.png')
| agpl-3.0 |
neiltest/Neil_MyApp_Test_Appium | adb_bat/logviewer/test_001.py | 2 | 1669 | #!/usr/bin/env python
# coding: utf-8
"""
@Author: Well
@Date: 2015 - 06 - 01
"""
"""
http://matplotlib.org/
http://blog.csdn.net/daniel_ustc/article/details/9714163
https://github.com/pyinstaller/pyinstaller/wiki
"""
import os
import time
def get_now():
return time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
def get_cmd(cmd):
time.sleep(1)
f = open(cmd, "r")
lines = f.readlines()
for line in lines:
print line
if 'ro.build.version.release' in line:
version = line.split('=')[1]
if 'ro.product.model' in line:
model = line.split('=')[1]
if 'ro.product.brand' in line:
brand = line.split('=')[1]
return [version, model, brand]
log_path = os.path.dirname(__file__) + '\\log_txt\\' + get_now() + '.log'
print log_path
package_name = 'com.htinns'
os.popen('adb shell cat /system/build.prop > ' + log_path)
print get_cmd(log_path)
# 使用Logcat清空原有的log
os.popen("adb logcat -c")
print "wait"
time.sleep(2)
# logcat
monkey_log = os.path.dirname(__file__) + '\\log_txt\\' + get_now() + '_monkey.log'
cmd = "adb shell monkey -p %s -s 50 --ignore-timeouts --monitor-native-crashes -v -v 10000 >>%s" \
% (package_name, monkey_log)
os.popen(cmd)
# print"使用Logcat导出日志"
logcat_name = os.path.dirname(__file__) + '\\log_txt\\' + get_now() + "_logcat.log"
cmd2 = "adb logcat *:W | grep %s > %s" % (package_name, logcat_name)
os.popen(cmd2)
# print"导出traces文件"
traces_name = os.path.dirname(__file__) + '\\log_txt\\' + get_now() + "_traces.log"
cmd3 = "adb shell cat /data/anr/traces.txt>%s" % (traces_name)
os.popen(cmd3)
| unlicense |
dllllb/ds-tools | category_encoder/category_encoder_comparison.py | 1 | 5150 | import os
import pandas as pd
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestClassifier
from dstools.ml.categorical import empirical_bayes_encoder, multi_class_empirical_bayes_encoder
from dstools.ml.categorical import count_encoder
from dstools.ml.categorical import empirical_bayes_vibrant_encoder, mc_empirical_bayes_vibrant_encoder
from dstools.ml.categorical import yandex_mean_encoder, mc_yandex_mean_encoder
from dstools.ml.categorical import noisy_mean_encoder, mc_noisy_mean_encoder
from dstools.ml.categorical import kfold_target_mean_encoder
from dstools.ml.categorical import mc_kfold_target_mean_encoder
def default_estimator(params):
category_encoding = params['category_encoding']
multi_class = params['multi_class']
if category_encoding == 'onehot':
df2dict = FunctionTransformer(
lambda x: x.to_dict(orient='records'), validate=False)
transf = make_pipeline(
df2dict,
DictVectorizer(sparse=False),
)
elif category_encoding == 'empirical_bayes':
if multi_class:
transf = multi_class_empirical_bayes_encoder()
else:
transf = empirical_bayes_encoder()
elif category_encoding == 'empirical_bayes_vibrant':
if multi_class:
transf = mc_empirical_bayes_vibrant_encoder(prior_est_frac=.3)
else:
transf = empirical_bayes_vibrant_encoder(prior_est_frac=.3)
elif category_encoding == 'count':
transf = count_encoder()
elif category_encoding == 'yandex_mean':
if multi_class:
transf = mc_yandex_mean_encoder()
else:
transf = yandex_mean_encoder()
elif category_encoding == 'noisy_mean':
if multi_class:
transf = mc_noisy_mean_encoder()
else:
transf = noisy_mean_encoder()
elif category_encoding == 'kfold_mean':
if multi_class:
transf = mc_kfold_target_mean_encoder()
else:
transf = kfold_target_mean_encoder()
else:
raise AssertionError(f'unkonwn category encoding: {category_encoding}')
transf = make_pipeline(transf, SimpleImputer(strategy=params['imputation']))
est_type = params['est_type']
if est_type == 'rf':
est = RandomForestClassifier(
n_estimators=params['n_estimators'],
max_features=params['max_features'],
max_depth=params['max_depth'],
random_state=1)
else:
raise AssertionError(f'unkonwn estimator: {est_type}')
return make_pipeline(transf, est)
def titanic_dataset(_):
import os
df = pd.read_csv(f'{os.path.dirname(__file__)}/titanic.csv')
df = df.replace(r'\s+', np.nan, regex=True)
features = df.drop(['survived', 'alive'], axis=1)
return features, df.survived
def titanic_experiment(overrides):
titanic_params = {
'est_type': 'rf',
'valid_type': 'cv',
'n_folds': 5,
'n_jobs': 1,
'n_estimators': 50,
'max_depth': 4,
'max_features': 'auto',
'imputation': 'most_frequent',
'multi_class': False,
}
params = {**titanic_params, **overrides}
results = run_experiment(default_estimator, titanic_dataset, 'roc_auc', params, 'titanic.json')
update_model_stats('titanic.json', params, results)
def beeline_dataset(_):
df = pd.read_csv(f'{os.path.dirname(__file__)}/beeline-ss20.csv.gz')
features = df.drop('y', axis=1)
return features, df.y
def beeline_experiment(overrides):
titanic_params = {
'est_type': 'rf',
'valid_type': 'cv',
'n_folds': 5,
'n_jobs': 1,
'n_estimators': 50,
'max_depth': 4,
'max_features': 'auto',
'imputation': 'most_frequent',
'multi_class': True,
}
params = {**titanic_params, **overrides}
results = run_experiment(default_estimator, beeline_dataset, 'accuracy', params)
update_model_stats('beeline.json', params, results)
def update_model_stats(stats_file, params, results):
import json
import os.path
if os.path.exists(stats_file):
with open(stats_file, 'r') as f:
stats = json.load(f)
else:
stats = []
stats.append({**results, **params})
with open(stats_file, 'w') as f:
json.dump(stats, f, indent=4)
def run_experiment(est, dataset, scorer, params):
import time
start = time.time()
if params['valid_type'] == 'cv':
cv = params['n_folds']
features, target = dataset(params)
scores = cv_test(est(params), features, target, scorer, cv)
exec_time = time.time() - start
return {**scores, 'exec-time-sec': exec_time}
def cv_test(est, features, target, scorer, cv):
scores = cross_val_score(est, features, target, scoring=scorer, cv=cv)
return {'score-mean': scores.mean(), 'score-std': scores.std()}
| apache-2.0 |
XiaoxiaoLiu/morphology_analysis | bigneuron/metrics_comparison.py | 1 | 9932 | __author__ = 'xiaoxiaol'
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import os
import os.path as path
import numpy as np
import recon_prescreening as rp
def calculate_similarities(neuron_distance_csv,metric='neuron_distance', output_similarity_csv =None):
df_nd = pd.read_csv(neuron_distance_csv)
all_images = np.unique(df_nd.image_id)
all_algorithms = np.unique(df_nd.algorithm)
print "\n\nCalculate similarity based on " +metric
print neuron_distance_csv + " has :"
print str(all_algorithms.size) + " algorithms"
print str(all_images.size) +" images"
#print all_algorithms
dfg = df_nd.groupby('image_id')
df_out = pd.DataFrame()
#sample_size_per_algorithm=[]
for image in all_images:
df_image = dfg.get_group(image)
#sample_size_per_image.append(df_image.shape[0])
# similarity == nan: metric reports nan
# similarity = 0 : missing entry ( missing recons)
#df_image['similarity'] = np.exp(-(df_image[metric] - df_nd[metric].min()+0.000000001)/(df_nd[metric].max()-df_nd[metric].min()+0.000000001))
df_image['similarity'] = (df_image[metric] - df_image[metric].min()+0.000000001)/(df_nd[metric].max()-df_nd[metric].min()+0.000000001)
# construct a complete table, and fill the valid results
df_image_filled_template = pd.DataFrame(columns = df_image.columns)
df_image_filled_template.algorithm = all_algorithms
df_image_filled_template.image_id = image
df_image_filled_template['similarity'] = 0.0
for i in range(df_image.shape[0]):
alg= df_image.iloc[i].algorithm
id = df_image_filled_template[df_image_filled_template.algorithm ==alg].index[0]
df_image_filled_template.ix[id] = df_image.iloc[i]
df_out = df_out.append(df_image_filled_template,ignore_index=True)
if not output_similarity_csv == None:
df_out.to_csv(output_similarity_csv, index=False)
print "output "+ output_similarity_csv
return df_out
def plot_similarities(neuron_distance_csv, outputDir,algorithms=None,metric='neuron_distance',CASE_BY_CASE_PLOT = 0, value_label=None):
df_nd_ori = pd.read_csv(neuron_distance_csv)
algorithm_name_mapping = rp.get_algorithm_name_dict()
df_nd = calculate_similarities(neuron_distance_csv,metric,output_similarity_csv=neuron_distance_csv+".similarity.csv")
all_images = np.unique(df_nd.image_id)
if not path.exists(outputDir):
os.mkdir(outputDir)
if algorithms is None:
algorithms= order_algorithms_by_size(df_nd_ori)
if CASE_BY_CASE_PLOT:
dfg = df_nd.groupby('image_id')
#sample_size_per_algorithm=[]
for image in all_images:
df_image_cur = dfg.get_group(image)
if df_image_cur.shape[0] > 0:
plt.figure()
plt.bar(range(df_image_cur.swc_file.size), df_image_cur['similarity'])
algorithm_names = [algorithm_name_mapping[x] for x in df_image_cur['algorithm']]
plt.xticks(range(df_image_cur.swc_file.size), algorithm_names,
rotation="90")
plt.ylabel(' Similarity (0~1) by ' +metric)
#plt.subplots_adjust(bottom=0.3)
plt.savefig(outputDir + '/sorted/figs/' + image.split('/')[-1] + '_'+metric+'_similarity.png', format='png')
#plt.show()
plt.close()
else:
print image+" has no valid reconstructions"
# dfg = df_nd.groupby('algorithm')
# rate_per_algorithm=[]
# for alg in algorithms:
# df_a = dfg.get_group(alg)
# sucessrate= float(np.count_nonzero(df_a['similarity']))/df_a.shape[0] * 100
# number = np.count_nonzero(df_a['similarity'])
# rate_per_algorithm.append(number)
dfg = df_nd_ori.groupby('algorithm')
sample_size_per_algorithm=[]
for alg in algorithms:
sample_size_per_algorithm.append(dfg.get_group(alg).shape[0])
plt.figure()
sb.set_context("talk", font_scale=0.7)
a=sb.barplot(y='algorithm', x='similarity', data=df_nd,order=algorithms)
algorithm_names = [algorithm_name_mapping[x] for x in algorithms]
a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ])
#sb.set_context("talk", font_scale=3.0)
#plt.xticks(rotation="90")
if value_label == None:
value_label = ' Similarity (0~1) by '+ metric
plt.ylabel('algorithm (n = # recons)')
plt.xlabel(value_label)
plt.subplots_adjust(left=0.4,right=0.9, bottom=0.1, top=0.9)
plt.savefig(outputDir + '/'+value_label+'.png', format='png')
#plt.show()
plt.close()
return
def plot_nblast_distance(neuron_distance_csv, outputDir,algorithms=None):
df_nd = pd.read_csv(neuron_distance_csv)
if not path.exists(outputDir):
os.mkdir(outputDir)
if algorithms is None:
algorithms= order_algorithms_by_size(df_nd)
### all algorithm plot
dfg = df_nd.groupby('algorithm')
sample_size_per_algorithm=[]
for alg in algorithms:
sample_size_per_algorithm.append(dfg.get_group(alg).shape[0])
#plot the average node distances
plt.figure()
sb.set_context("talk", font_scale=0.7)
a=sb.barplot(y='algorithm', x='nblast_bi_score', data=df_nd,order=algorithms)
algorithm_name_mapping = rp.get_algorithm_name_dict()
algorithm_names = [algorithm_name_mapping[x] for x in algorithms]
a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ])
#sb.set_context("talk", font_scale=3.0)
#plt.xticks(rotation="90")
plt.xlabel('Nblast Scores')
plt.subplots_adjust(left=0.6,right=0.95,bottom=0.1, top=0.9)
plt.savefig(outputDir + '/NBlast_Score.png', format='png')
#plt.show()
plt.close()
return
def plot_diadem_score(neuron_distance_csv, outputDir,algorithms=None):
df_nd = pd.read_csv(neuron_distance_csv)
if not path.exists(outputDir):
os.mkdir(outputDir)
if algorithms is None:
algorithms= order_algorithms_by_size(df_nd)
### all algorithm plot
dfg = df_nd.groupby('algorithm')
sample_size_per_algorithm=[]
for alg in algorithms:
sample_size_per_algorithm.append(dfg.get_group(alg).shape[0])
#plot the average node distances
plt.figure()
sb.set_context("talk", font_scale=0.7)
a=sb.barplot(y='algorithm', x='diadem_score', data=df_nd,order=algorithms)
algorithm_name_mapping = rp.get_algorithm_name_dict()
algorithm_names = [algorithm_name_mapping[x] for x in algorithms]
a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ])
#sb.set_context("talk", font_scale=3.0)
#plt.xticks(rotation="90")
plt.xlabel('Diadem Scores')
plt.subplots_adjust(left=0.5,right=0.95,bottom=0.1, top=0.9)
plt.savefig(outputDir + '/Diadem_score.png', format='png')
#plt.show()
plt.close()
return
def plot_all_score(neuron_distance_csv, outputDir,algorithms=None):
df_nd = pd.read_csv(neuron_distance_csv)
if not path.exists(outputDir):
os.mkdir(outputDir)
if algorithms is None:
algorithms= order_algorithms_by_size(df_nd)
### all algorithm plot
dfg = df_nd.groupby('algorithm')
sample_size_per_algorithm=[]
for alg in algorithms:
sample_size_per_algorithm.append(dfg.get_group(alg).shape[0])
#plot the average node distances
plt.figure()
sb.set_style("white")
#g = sb.lmplot(x="image_id", y="diadem_score", hue="algorithm", data=df_nd,fit_reg=False)
a=sb.barplot(y='algorithm', x=['diadem_score','nblast_bi_score'], data=df_nd,order=algorithms)
algorithm_name_mapping = rp.get_algorithm_name_dict()
algorithm_names = [algorithm_name_mapping[x] for x in algorithms]
a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms.size) ])
sb.set_context("talk", font_scale=3.0)
plt.xticks(rotation="90")
plt.xlabel('All Scores')
plt.subplots_adjust(left=0.5,right=0.95,bottom=0.1, top=0.9)
plt.savefig(outputDir + '/All_scores.png', format='png')
plt.show()
# plt.close()
return
def order_algorithms_by_size(df_data):
algorithms = np.unique(df_data.algorithm)
dfg = df_data.groupby('algorithm')
sample_size_per_algorithm=[]
for alg in algorithms:
sample_size_per_algorithm.append(dfg.get_group(alg).shape[0])
sorted_idex = np.argsort(np.array(sample_size_per_algorithm))
algorithms=algorithms[sorted_idex]
return algorithms
data_DIR="/data/mat/xiaoxiaol/data/big_neuron/BTU"
#output
result_csv_file = data_DIR+"/all_metrics.csv"
nblast_csv_file =data_DIR+"/nblast/nblast_score_with_meta.csv"
diadem_csv_file =data_DIR+"/diadem/diadem_score_with_meta.csv"
df_diadem = pd.read_csv(data_DIR+"/diadem/result_combined.csv")
df_diadem.dropna(inplace=True)
df_diadem = rp.parse_and_add_algorithm_info(df_diadem)
df_diadem.to_csv(diadem_csv_file,index=False)
df_nblast = pd.read_csv(data_DIR+"/nblast/nblast_score_after_postprocessing_6_29.csv")#
df_nblast.dropna(inplace=True)
df_nblast = rp.parse_and_add_algorithm_info(df_nblast)
df_nblast.to_csv(nblast_csv_file,index=False)
df_all= pd.merge(df_diadem, df_nblast, on=['swc_file_name','algorithm','image_id'])
df_all.dropna(inplace=True)
df_all.to_csv(result_csv_file,index=False)
plot_nblast_distance(nblast_csv_file, data_DIR+"/nblast")
plot_similarities(nblast_csv_file, data_DIR+"/nblast",metric='nblast_bi_score')
plot_diadem_score(diadem_csv_file, data_DIR+"/diadem")
plot_similarities(diadem_csv_file, data_DIR+"/diadem",metric='diadem_score')
#plot_all_score(data_DIR+"/all_metrics.csv", data_DIR)
| gpl-3.0 |
xu6148152/Binea_Python_Project | DataAnalysis/intro/movie_lens_test.py | 1 | 1230 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import pandas as pd
unames = ['user_id', 'gender', 'age', 'occupation', 'zip']
users = pd.read_table('users.dat', sep='::', header=None, names=unames)
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
ratings = pd.read_table('ratings.dat', sep='::', header=None, names=rnames)
mnames = ['user_id', 'title', 'genres']
movies = pd.read_table('movies.dat', sep='::', header=None, names=mnames)
def test_users():
print(users[:5])
def test_rating():
print(ratings[:5])
def test_movies():
print(movies[:5])
def test_merge_data():
data = pd.merge(pd.merge(ratings, users), movies)
# print(data.ix[0])
mean_rating = data.pivot_table('rating', index='title', columns='gender', aggfunc='mean')
# print(mean_rating[:5])
ratings_by_title = data.groupby('title').size()
# print(ratings_by_title[:10])
active_titles = ratings_by_title.index[ratings_by_title >= 250]
# print(active_titles[:10])
rating_std_by_title = data.groupby('title')['rating'].std()
rating_std_by_title = rating_std_by_title.ix[active_titles]
print(rating_std_by_title.order(ascending=False)[:10])
if __name__ == '__main__':
test_merge_data()
| mit |
phoebe-project/phoebe2-docs | development/tutorials/spots.py | 2 | 3736 | #!/usr/bin/env python
# coding: utf-8
# Advanced: Spots
# ============================
#
# For an introduction features in general, see the [features](./features.ipynb) tutorial.
#
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Adding Spots
# ---------------------
# Let's add one spot to each of our stars in the binary.
#
# A spot is a feature, and needs to be attached directly to a component upon creation. Providing a tag for 'feature' is entirely optional - if one is not provided it will be created automatically.
# In[3]:
b.add_feature('spot', component='primary', feature='spot01')
# As a shortcut, we can also call add_spot directly.
# In[4]:
b.add_spot(component='secondary', feature='spot02')
# Relevant Parameters
# -----------------
# A spot is defined by the colatitude (where 0 is defined as the North (spin) Pole) and longitude (where 0 is defined as pointing towards the other star for a binary, or to the observer for a single star) of its center, its angular radius, and the ratio of temperature of the spot to the local intrinsic value.
# In[5]:
print(b.filter(feature='spot01'))
# In[6]:
b.set_value(qualifier='relteff', feature='spot01', value=0.9)
# In[7]:
b.set_value(qualifier='radius', feature='spot01', value=30)
# In[8]:
b.set_value(qualifier='colat', feature='spot01', value=45)
# In[9]:
b.set_value(qualifier='long', feature='spot01', value=90)
# To see the spot, add a mesh dataset and plot it.
# In[10]:
b.add_dataset('mesh', times=[0,0.25,0.5,0.75,1.0], columns=['teffs'])
# In[11]:
b.run_compute()
# In[12]:
afig, mplfig = b.filter(component='primary', time=0.75).plot(fc='teffs', show=True)
# Spot Corotation
# --------------------
#
# The positions (colat, long) of a spot are defined at t0 (note: t0@system, not necessarily t0_perpass or t0_supconj). If the stars are not synchronous, then the spots will corotate with the star. To illustrate this, let's set the syncpar > 1 and plot the mesh at three different phases from above.
# In[13]:
b.set_value(qualifier='syncpar', component='primary', value=1.5)
# In[14]:
b.run_compute(irrad_method='none')
# At time=t0=0, we can see that the spot is where defined: 45 degrees south of the north pole and 90 degree longitude (where longitude of 0 is defined as pointing towards the companion star at t0).
# In[15]:
print("t0 = {}".format(b.get_value(qualifier='t0', context='system')))
# In[16]:
afig, mplfig = b.plot(time=0, y='ws', fc='teffs', ec='None', show=True)
# At a later time, the spot is still technically at the same coordinates, but longitude of 0 no longer corresponds to pointing to the companion star. The coordinate system has rotated along with the asyncronous rotation of the star.
# In[17]:
afig, mplfig = b.plot(time=0.25, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)
# In[18]:
afig, mplfig = b.plot(time=0.5, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)
# In[19]:
ax, artists = b.plot(time=0.75, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)
# Since the syncpar was set to 1.5, one full orbit later the star (and the spot) has made an extra half-rotation.
# In[20]:
ax, artists = b.plot(time=1.0, y='ws', fc='teffs', facecmap='YlOrRd', ec='None', show=True)
| gpl-3.0 |
abimannans/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
gem/oq-engine | openquake/hazardlib/lt.py | 1 | 24981 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2020, GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import copy
import collections
import numpy
from openquake.baselib.general import CallableDict
from openquake.hazardlib import geo, source as ohs
from openquake.hazardlib.sourceconverter import (
split_coords_2d, split_coords_3d)
class LogicTreeError(Exception):
"""
Logic tree file contains a logic error.
:param node:
XML node object that causes fail. Used to determine
the affected line number.
All other constructor parameters are passed to :class:`superclass'
<LogicTreeError>` constructor.
"""
def __init__(self, node, filename, message):
self.filename = filename
self.message = message
self.lineno = node if isinstance(node, int) else getattr(
node, 'lineno', '?')
def __str__(self):
return "filename '%s', line %s: %s" % (
self.filename, self.lineno, self.message)
# parse_uncertainty #
def unknown(utype, node, filename):
try:
return float(node.text)
except (TypeError, ValueError):
raise LogicTreeError(node, filename, 'expected single float value')
parse_uncertainty = CallableDict(keymissing=unknown)
@parse_uncertainty.add('sourceModel', 'extendModel')
def smodel(utype, node, filename):
return node.text.strip()
@parse_uncertainty.add('abGRAbsolute')
def abGR(utype, node, filename):
try:
[a, b] = node.text.split()
return float(a), float(b)
except ValueError:
raise LogicTreeError(
node, filename, 'expected a pair of floats separated by space')
@parse_uncertainty.add('incrementalMFDAbsolute')
def incMFD(utype, node, filename):
min_mag, bin_width = (node.incrementalMFD["minMag"],
node.incrementalMFD["binWidth"])
return min_mag, bin_width, ~node.incrementalMFD.occurRates
@parse_uncertainty.add('truncatedGRFromSlipAbsolute')
def trucMFDFromSlip_absolute(utype, node, filename):
slip_rate, rigidity = (node.faultActivityData["slipRate"],
node.faultActivityData["rigidity"])
return slip_rate, rigidity
@parse_uncertainty.add('simpleFaultGeometryAbsolute')
def simpleGeom(utype, node, filename):
if hasattr(node, 'simpleFaultGeometry'):
node = node.simpleFaultGeometry
_validate_simple_fault_geometry(utype, node, filename)
spacing = node["spacing"]
usd, lsd, dip = (~node.upperSeismoDepth, ~node.lowerSeismoDepth,
~node.dip)
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords])
return trace, usd, lsd, dip, spacing
@parse_uncertainty.add('complexFaultGeometryAbsolute')
def complexGeom(utype, node, filename):
if hasattr(node, 'complexFaultGeometry'):
node = node.complexFaultGeometry
_validate_complex_fault_geometry(utype, node, filename)
spacing = node["spacing"]
edges = []
for edge_node in node.nodes:
coords = split_coords_3d(~edge_node.LineString.posList)
edges.append(geo.Line([geo.Point(*p) for p in coords]))
return edges, spacing
@parse_uncertainty.add('characteristicFaultGeometryAbsolute')
def charGeom(utype, node, filename):
surfaces = []
for geom_node in node.surface:
if "simpleFaultGeometry" in geom_node.tag:
_validate_simple_fault_geometry(utype, geom_node, filename)
trace, usd, lsd, dip, spacing = parse_uncertainty(
'simpleFaultGeometryAbsolute', geom_node, filename)
surfaces.append(geo.SimpleFaultSurface.from_fault_data(
trace, usd, lsd, dip, spacing))
elif "complexFaultGeometry" in geom_node.tag:
_validate_complex_fault_geometry(utype, geom_node, filename)
edges, spacing = parse_uncertainty(
'complexFaultGeometryAbsolute', geom_node, filename)
surfaces.append(geo.ComplexFaultSurface.from_fault_data(
edges, spacing))
elif "planarSurface" in geom_node.tag:
_validate_planar_fault_geometry(utype, geom_node, filename)
nodes = []
for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]:
nodes.append(geo.Point(getattr(geom_node, key)["lon"],
getattr(geom_node, key)["lat"],
getattr(geom_node, key)["depth"]))
top_left, top_right, bottom_right, bottom_left = tuple(nodes)
surface = geo.PlanarSurface.from_corner_points(
top_left, top_right, bottom_right, bottom_left)
surfaces.append(surface)
else:
raise LogicTreeError(
geom_node, filename, "Surface geometry type not recognised")
if len(surfaces) > 1:
return geo.MultiSurface(surfaces)
else:
return surfaces[0]
# validations
def _validate_simple_fault_geometry(utype, node, filename):
try:
coords = split_coords_2d(~node.LineString.posList)
trace = geo.Line([geo.Point(*p) for p in coords])
except ValueError:
# If the geometry cannot be created then use the LogicTreeError
# to point the user to the incorrect node. Hence, if trace is
# compiled successfully then len(trace) is True, otherwise it is
# False
trace = []
if len(trace):
return
raise LogicTreeError(
node, filename, "'simpleFaultGeometry' node is not valid")
def _validate_complex_fault_geometry(utype, node, filename):
# NB: if the geometry does not conform to the Aki & Richards convention
# this will not be verified here, but will raise an error when the surface
# is created
valid_edges = []
for edge_node in node.nodes:
try:
coords = split_coords_3d(edge_node.LineString.posList.text)
edge = geo.Line([geo.Point(*p) for p in coords])
except ValueError:
# See use of validation error in simple geometry case
# The node is valid if all of the edges compile correctly
edge = []
if len(edge):
valid_edges.append(True)
else:
valid_edges.append(False)
if node["spacing"] and all(valid_edges):
return
raise LogicTreeError(
node, filename, "'complexFaultGeometry' node is not valid")
def _validate_planar_fault_geometry(utype, node, filename):
valid_spacing = node["spacing"]
for key in ["topLeft", "topRight", "bottomLeft", "bottomRight"]:
lon = getattr(node, key)["lon"]
lat = getattr(node, key)["lat"]
depth = getattr(node, key)["depth"]
valid_lon = (lon >= -180.0) and (lon <= 180.0)
valid_lat = (lat >= -90.0) and (lat <= 90.0)
valid_depth = (depth >= 0.0)
is_valid = valid_lon and valid_lat and valid_depth
if not is_valid or not valid_spacing:
raise LogicTreeError(
node, filename, "'planarFaultGeometry' node is not valid")
# apply_uncertainty #
apply_uncertainty = CallableDict()
@apply_uncertainty.add('simpleFaultDipRelative')
def _simple_fault_dip_relative(utype, source, value):
source.modify('adjust_dip', dict(increment=value))
@apply_uncertainty.add('simpleFaultDipAbsolute')
def _simple_fault_dip_absolute(bset, source, value):
source.modify('set_dip', dict(dip=value))
@apply_uncertainty.add('simpleFaultGeometryAbsolute')
def _simple_fault_geom_absolute(utype, source, value):
trace, usd, lsd, dip, spacing = value
source.modify(
'set_geometry',
dict(fault_trace=trace, upper_seismogenic_depth=usd,
lower_seismogenic_depth=lsd, dip=dip, spacing=spacing))
@apply_uncertainty.add('complexFaultGeometryAbsolute')
def _complex_fault_geom_absolute(utype, source, value):
edges, spacing = value
source.modify('set_geometry', dict(edges=edges, spacing=spacing))
@apply_uncertainty.add('characteristicFaultGeometryAbsolute')
def _char_fault_geom_absolute(utype, source, value):
source.modify('set_geometry', dict(surface=value))
@apply_uncertainty.add('abGRAbsolute')
def _abGR_absolute(utype, source, value):
a, b = value
source.mfd.modify('set_ab', dict(a_val=a, b_val=b))
@apply_uncertainty.add('bGRRelative')
def _abGR_relative(utype, source, value):
source.mfd.modify('increment_b', dict(value=value))
@apply_uncertainty.add('maxMagGRRelative')
def _maxmagGR_relative(utype, source, value):
source.mfd.modify('increment_max_mag', dict(value=value))
@apply_uncertainty.add('maxMagGRAbsolute')
def _maxmagGR_absolute(utype, source, value):
source.mfd.modify('set_max_mag', dict(value=value))
@apply_uncertainty.add('incrementalMFDAbsolute')
def _incMFD_absolute(utype, source, value):
min_mag, bin_width, occur_rates = value
source.mfd.modify('set_mfd', dict(min_mag=min_mag, bin_width=bin_width,
occurrence_rates=occur_rates))
@apply_uncertainty.add('truncatedGRFromSlipAbsolute')
def _trucMFDFromSlip_absolute(utype, source, value):
slip_rate, rigidity = value
source.modify('adjust_mfd_from_slip', dict(slip_rate=slip_rate,
rigidity=rigidity))
# ######################### apply_uncertainties ########################### #
def apply_uncertainties(bset_values, src_group):
"""
:param bset_value: a list of pairs (branchset, value)
List of branch IDs
:param src_group:
SourceGroup instance
:returns:
A copy of the original group with possibly modified sources
"""
sg = copy.copy(src_group)
sg.sources = []
sg.changes = 0
for source in src_group:
oks = [bset.filter_source(source) for bset, value in bset_values]
if sum(oks): # source not filtered out
src = copy.deepcopy(source)
srcs = []
for (bset, value), ok in zip(bset_values, oks):
if ok and bset.collapsed:
if src.code == b'N':
raise NotImplementedError(
'Collapsing of the logic tree is not implemented '
'for %s' % src)
for br in bset.branches:
newsrc = copy.deepcopy(src)
newsrc.scaling_rate = br.weight
apply_uncertainty(
bset.uncertainty_type, newsrc, br.value)
srcs.append(newsrc)
sg.changes += len(srcs)
elif ok:
if not srcs: # only the first time
srcs.append(src)
apply_uncertainty(bset.uncertainty_type, src, value)
sg.changes += 1
else:
srcs = [copy.copy(source)] # this is ultra-fast
sg.sources.extend(srcs)
return sg
# ######################### sampling ######################## #
def random(size, seed, sampling_method='early_weights'):
"""
:param size: size of the returned array (integer or pair of integers)
:param seed: random seed
:param sampling_method: 'early_weights', 'early_latin', ...
:returns: an array of floats in the range 0..1
You can compare montecarlo sampling with latin square sampling with
the following code:
import matplotlib.pyplot as plt
samples, seed = 10, 42
x, y = random((samples, 2), seed, 'early_latin').T
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.scatter(x, y, color='green') # points on a latin square
x, y = random((samples, 2), seed, 'early_weights').T
plt.scatter(x, y, color='red') # points NOT on a latin square
for x in numpy.arange(0, 1, 1/samples):
for y in numpy.arange(0, 1, 1/samples):
plt.axvline(x)
plt.axhline(y)
plt.show()
"""
numpy.random.seed(seed)
xs = numpy.random.uniform(size=size)
if sampling_method.endswith('latin'):
# https://zmurchok.github.io/2019/03/15/Latin-Hypercube-Sampling.html
try:
s, d = size
except TypeError: # cannot unpack non-iterable int object
return (numpy.argsort(xs) + xs) / size
for i in range(d):
xs[:, i] = (numpy.argsort(xs[:, i]) + xs[:, i]) / s
return xs
def _cdf(weighted_objects):
weights = []
for obj in weighted_objects:
w = obj.weight
if isinstance(obj.weight, float):
weights.append(w)
else:
weights.append(w['weight'])
return numpy.cumsum(weights)
def sample(weighted_objects, probabilities, sampling_method):
"""
Take random samples of a sequence of weighted objects
:param weighted_objects:
A finite sequence of N objects with a `.weight` attribute.
The weights must sum up to 1.
:param probabilities:
An array of S random numbers in the range 0..1
:return:
A list of S objects extracted randomly
"""
if sampling_method.startswith('early'): # consider the weights
idxs = numpy.searchsorted(_cdf(weighted_objects), probabilities)
elif sampling_method.startswith('late'):
n = len(weighted_objects) # consider all weights equal
idxs = numpy.searchsorted(numpy.arange(1/n, 1, 1/n), probabilities)
# NB: returning an array would break things
return [weighted_objects[idx] for idx in idxs]
Weighted = collections.namedtuple('Weighted', 'object weight')
# used in notebooks for teaching, not in the engine
def random_sample(branchsets, num_samples, seed, sampling_method):
"""
>>> bsets = [[('X', .4), ('Y', .6)], [('A', .2), ('B', .3), ('C', .5)]]
>>> paths = random_sample(bsets, 100, 42, 'early_weights')
>>> collections.Counter(paths)
Counter({'YC': 26, 'XC': 24, 'YB': 17, 'XA': 13, 'YA': 10, 'XB': 10})
>>> paths = random_sample(bsets, 100, 42, 'late_weights')
>>> collections.Counter(paths)
Counter({'XA': 20, 'YA': 18, 'XB': 17, 'XC': 15, 'YB': 15, 'YC': 15})
>>> paths = random_sample(bsets, 100, 42, 'early_latin')
>>> collections.Counter(paths)
Counter({'YC': 31, 'XC': 19, 'YB': 17, 'XB': 13, 'YA': 12, 'XA': 8})
>>> paths = random_sample(bsets, 100, 45, 'late_latin')
>>> collections.Counter(paths)
Counter({'YC': 18, 'XA': 18, 'XC': 16, 'YA': 16, 'XB': 16, 'YB': 16})
"""
probs = random((num_samples, len(branchsets)), seed, sampling_method)
arr = numpy.zeros((num_samples, len(branchsets)), object)
for b, bset in enumerate(branchsets):
arr[:, b] = sample([Weighted(*it) for it in bset], probs[:, b],
sampling_method)
return [''.join(w.object for w in row) for row in arr]
# ######################### branches and branchsets ######################## #
class Branch(object):
"""
Branch object, represents a ``<logicTreeBranch />`` element.
:param bs_id:
BranchSetID of the branchset to which the branch belongs
:param branch_id:
String identifier of the branch
:param weight:
float value of weight assigned to the branch. A text node contents
of ``<uncertaintyWeight />`` child node.
:param value:
The actual uncertainty parameter value. A text node contents
of ``<uncertaintyModel />`` child node. Type depends
on the branchset's uncertainty type.
"""
def __init__(self, bs_id, branch_id, weight, value):
self.bs_id = bs_id
self.branch_id = branch_id
self.weight = weight
self.value = value
self.bset = None
def __repr__(self):
if self.bset:
return '%s%s' % (self.branch_id, self.bset)
else:
return self.branch_id
class BranchSet(object):
"""
Branchset object, represents a ``<logicTreeBranchSet />`` element.
:param uncertainty_type:
String value. According to the spec one of:
gmpeModel
Branches contain references to different GMPEs. Values are parsed
as strings and are supposed to be one of supported GMPEs. See list
at :class:`GMPELogicTree`.
sourceModel
Branches contain references to different PSHA source models. Values
are treated as file names, relatively to base path.
maxMagGRRelative
Different values to add to Gutenberg-Richter ("GR") maximum
magnitude. Value should be interpretable as float.
bGRRelative
Values to add to GR "b" value. Parsed as float.
maxMagGRAbsolute
Values to replace GR maximum magnitude. Values expected to be
lists of floats separated by space, one float for each GR MFD
in a target source in order of appearance.
abGRAbsolute
Values to replace "a" and "b" values of GR MFD. Lists of pairs
of floats, one pair for one GR MFD in a target source.
incrementalMFDAbsolute
Replaces an evenly discretized MFD with the values provided
simpleFaultDipRelative
Increases or decreases the angle of fault dip from that given
in the original source model
simpleFaultDipAbsolute
Replaces the fault dip in the specified source(s)
simpleFaultGeometryAbsolute
Replaces the simple fault geometry (trace, upper seismogenic depth
lower seismogenic depth and dip) of a given source with the values
provided
complexFaultGeometryAbsolute
Replaces the complex fault geometry edges of a given source with
the values provided
characteristicFaultGeometryAbsolute
Replaces the complex fault geometry surface of a given source with
the values provided
truncatedGRFromSlipAbsolute
Updates a TruncatedGR using a slip rate and a rigidity
:param filters:
Dictionary, a set of filters to specify which sources should
the uncertainty be applied to. Represented as branchset element's
attributes in xml:
applyToSources
The uncertainty should be applied only to specific sources.
This filter is required for absolute uncertainties (also
only one source can be used for those). Value should be the list
of source ids. Can be used only in source model logic tree.
applyToSourceType
Can be used in the source model logic tree definition. Allows
to specify to which source type (area, point, simple fault,
complex fault) the uncertainty applies to.
applyToTectonicRegionType
Can be used in both the source model and GMPE logic trees. Allows
to specify to which tectonic region type (Active Shallow Crust,
Stable Shallow Crust, etc.) the uncertainty applies to. This
filter is required for all branchsets in GMPE logic tree.
"""
def __init__(self, uncertainty_type, ordinal=0, filters=None,
collapsed=False):
self.uncertainty_type = uncertainty_type
self.ordinal = ordinal
self.filters = filters or {}
self.collapsed = collapsed
self.branches = []
def sample(self, probabilities, sampling_method):
"""
:param num_samples: the number of samples
:param probabilities: (Ns, Nb) random numbers in the range 0..1
:param sampling_method: the sampling method used
:returns: a list of num_samples lists of branches
"""
out = []
for probs in probabilities: # probs has a value for each branchset
branchset = self
branches = []
while branchset is not None:
if branchset.collapsed:
branch = branchset.branches[0]
else:
x = probs[branchset.ordinal]
[branch] = sample(branchset.branches, [x], sampling_method)
branches.append(branch)
branchset = branch.bset
out.append(branches)
return out
def enumerate_paths(self):
"""
Generate all possible paths starting from this branch set.
:returns:
Generator of two-item tuples. Each tuple contains weight
of the path (calculated as a product of the weights of all path's
branches) and list of path's :class:`Branch` objects. Total sum
of all paths' weights is 1.0
"""
for path in self._enumerate_paths([]):
flat_path = []
weight = 1.0
while path:
path, branch = path
weight *= branch.weight
flat_path.append(branch)
yield weight, flat_path[::-1]
def _enumerate_paths(self, prefix_path):
"""
Recursive (private) part of :func:`enumerate_paths`. Returns generator
of recursive lists of two items, where second item is the branch object
and first one is itself list of two items.
"""
if self.collapsed:
b0 = copy.copy(self.branches[0])
b0.weight = 1.0
branches = [b0]
else:
branches = self.branches
for branch in branches:
path = [prefix_path, branch]
if branch.bset is not None:
yield from branch.bset._enumerate_paths(path)
else:
yield path
def __getitem__(self, branch_id):
"""
Return :class:`Branch` object belonging to this branch set with id
equal to ``branch_id``.
"""
for branch in self.branches:
if branch.branch_id == branch_id:
return branch
raise KeyError(branch_id)
def filter_source(self, source):
# pylint: disable=R0911,R0912
"""
Apply filters to ``source`` and return ``True`` if uncertainty should
be applied to it.
"""
for key, value in self.filters.items():
if key == 'applyToTectonicRegionType':
if value != source.tectonic_region_type:
return False
elif key == 'applyToSourceType':
if value == 'area':
if not isinstance(source, ohs.AreaSource):
return False
elif value == 'point':
# area source extends point source
if (not isinstance(source, ohs.PointSource)
or isinstance(source, ohs.AreaSource)):
return False
elif value == 'simpleFault':
if not isinstance(source, ohs.SimpleFaultSource):
return False
elif value == 'complexFault':
if not isinstance(source, ohs.ComplexFaultSource):
return False
elif value == 'characteristicFault':
if not isinstance(source, ohs.CharacteristicFaultSource):
return False
else:
raise AssertionError("unknown source type '%s'" % value)
elif key == 'applyToSources':
if source and source.source_id not in value:
return False
else:
raise AssertionError("unknown filter '%s'" % key)
# All filters pass, return True.
return True
def get_bset_values(self, ltpath):
"""
:param ltpath:
List of branch IDs
:returns:
A list of pairs [(bset, value), ...]
"""
pairs = []
bset = self
while ltpath:
brid, ltpath = ltpath[0], ltpath[1:]
pairs.append((bset, bset[brid].value))
bset = bset[brid].bset
if bset is None:
break
return pairs
def __str__(self):
return repr(self.branches)
def __repr__(self):
return '<%s>' % ' '.join(br.branch_id for br in self.branches)
| agpl-3.0 |
jpeterbaker/maxfield | lib/geometry.py | 1 | 12050 | '''
This file is part of Maxfield.
Maxfield is a planning tool for helping Ingress players to determine
an efficient plan to create many in-game fields.
Copyright (C) 2015 by Jonathan Baker: babamots@gmail.com
Maxfield is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Maxfield is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Maxfield. If not, see <http://www.gnu.org/licenses/>.
'''
# Portals, triangles and the like
import numpy as np
from itertools import combinations
def LLtoRads(pts):
pts = pts.astype(float)
pts *= np.pi / 180
return pts
def radstoxyz(pts,R=1):
# Converts degree latitude/longitude to xyz coords
# Returns corresponding n x 3 array
pts = pts.reshape([-1,2])
lat = pts[:,0]
lng = pts[:,1]
# The radius of the latitude line
r = np.cos(lat)
x = np.cos(lng)*r
y = np.sin(lng)*r
z = np.sin(lat)
xyz = np.column_stack([x,y,z])
xyz *= R
return xyz
def xyztorads(pts,R=1):
pts = pts.reshape([-1,3])
pts = pts/R
x = pts[:,0]
y = pts[:,1]
z = pts[:,2]
lat = np.arcsin(z)
lng = np.arctan2(y,x)
return np.column_stack([lat,lng])
def greatArcAng(x,y):
'''
x,y should be nx2 arrays expressing latitude,longitude (in radians)
Great arc angle between x and y (in radians)
'''
# If either is a single point (not in a list) return a 1-d array
flatten = y.ndim==1 or x.ndim==1
# Formula taken from Wikipedia, accurate for distances great and small
x = x.reshape([-1,2])
y = y.reshape([-1,2])
nx = x.shape[0]
ny = y.shape[0]
# After reshaping, arithmetic operators produce distance-style matrices
latx = np.tile(x[:,0],[ny,1])
lngx = np.tile(x[:,1],[ny,1])
laty = np.tile(y[:,0],[nx,1]).T
lngy = np.tile(y[:,1],[nx,1]).T
dlng = np.abs(lngx-lngy)
sinx = np.sin(latx)
cosx = np.cos(latx)
siny = np.sin(laty)
cosy = np.cos(laty)
sind = np.sin(dlng)
cosd = np.cos(dlng)
numer = np.sqrt( (cosx*sind)**2 + (cosy*sinx-siny*cosx*cosd)**2 )
denom = siny*sinx + cosy*cosx*cosd
# great arc angle containing x and y
angles = np.arctan2(numer,denom)
if flatten:
angles.shape = -1
return angles
def sphereDist(x,y,R=6371000):
'''
x,y are n x 2 arrays with lattitude, longitude in radians
'''
sigma = greatArcAng(x,y)
return R*sigma
def sphereTriContains(pts,x):
'''
pts is a 3 x 3 array representing vertices of a triangle
pts[i] contains the x,y,z coords of vertex i
x is a 2-array representing the test point
points should be represented in xyz format
returns True iff x is inside the triangle
yes, three points make two triangles, but we assume the small one
behavior in border cases ont guaranteed
'''
x = x.reshape([-1,3])
# Find vectors orthogonal to the planes through origin and triangle sides
crosses = np.cross( pts[[1,2,0]] , pts[[2,0,1]] )
xsign = np.dot( crosses,x.T )
psign = np.sum(crosses*pts,1).reshape([3,1])
# Check whether opposite vertex is always on same side of plane as x
return np.all( xsign*psign > 0,0)
def planeDist(x,y=None):
x = x.reshape([-1,2])
if y is None:
y = x
else:
y = y.reshape([-1,2])
#TODO this is not a clever way of makeing the matrix
return np.sqrt(np.array([ [sum( (a-b)**2 ) for a in y] for b in x ]))
def makeLace(n):
# sequence of perimeter nodes to hit for a lacing-style triangulation
# begins with the edge 1,-1
lace = np.arange(1,n//2)
lace = np.vstack([lace,(-lace)%n])
lace = lace.T.reshape(-1)
lace = list(lace)
if n%2==1:
lace.append(n//2)
return lace
def rotate(x):
# rotate the vector(s) in x by one quarter turn counter-clockwise
if x.ndim == 1:
x[[0,1]] = [-x[1],x[0]]
else:
x[:,[0,1]] = x[:,[1,0]]
x[:,0] *= -1
def norms(x):
'Norm per row of x'
return np.sqrt(np.sum(x**2,1))
def gnomonicProj(pts,ptsxyz=None):
'''
pts should be in lat/lng
Uses the centroid of pts as the center, North Pole as positive y-direction
This is only guaranteed to work if no two points are more than 90 degrees apart (great arcwise)
This is about 9700 km across the surface of Earth
'''
if ptsxyz is None:
ptsxyz = radstoxyz(pts)
# We'll project onto the plane tangent at base
basexyz = ptsxyz.mean(0)
basexyz /= np.linalg.norm(basexyz)
base = xyztorads(basexyz).reshape(-1)
# We'll us the triangle base - portal - North Pole
# The angles at these vertices are, respectively A - B - C
# The corresponding lowercase letter is arc-angle of the opposite edge
a = np.pi/2-pts[:,0]
b = np.pi/2-base[0]
c = greatArcAng(base,pts)
C = base[1] - pts[:,1]
# http://en.wikipedia.org/wiki/Spherical_trigonometry#Identities
# A = arcsin[ sin(a)*sin(C) / sin(c) ]
# A = arccos[ { cos(a)-cos(c)*cos(b) } / { sin(c)*sin(b) } ]
sinA = np.sin(a)*np.sin(C) / np.sin(c)
cosA= (np.cos(a)-np.cos(c)*np.cos(b))/(np.sin(c)*np.sin(b))
# arcsin can only fall in [-pi/2,pi/2]
# we can find obtuse angles this way
theta = np.arctan2(sinA,cosA)
# Distance from base
r = np.tan(c).reshape([-1,1])
# theta measures counter-clockwise from north
xy = np.column_stack([ -np.sin(theta) , np.cos(theta) ])*r
return xy
def between(a,b,pts):
# For use with gerperim
# Returns the index of a point in pts "left" of the ray a-b
# diff will be orthogonal to the line through a,b
diff = pts[a]-pts[b]
rotate(diff)
# maximum inner product with diff
c = np.argmax(np.dot(pts,diff))
if c == a or c == b:
return None
else:
return c
def getPerim(pts):
'''
Returns a list of indices of the points on the "outside" (in the boundary of the convex hull)
This is for planar points (spherical points should be get Gnomonic projection first)
'''
# Point with the greatest x-coordinate is certainly outside
hix = np.argmax(pts[:,0])
# Same goes for the point with the least x-coordinate
lox = np.argmin(pts[:,0])
perim = {hix:lox , lox:hix}
perimlist = []
a = hix
b = lox
aNeverChanged = True
while a != hix or aNeverChanged:
c = between(a,b,pts)
if c is None:
# there is no perimeter point between a and b
# proceed to the next adjacent pair
perimlist.append(a)
a = b
b = perim[b]
aNeverChanged = False
else:
# c is on the perimeter after a
# we will next look for another point between a,c
perim[a] = c
perim[c] = b
b = c
return perimlist
def arc(a,b,c):
'''
Finds the arc through three points in a plane
returns z,r,ta,tb,tc
z = [x,y] is the center of the arc
r is the radius of the arc
a = z+r*[cos(ta),sin(ta)]
b = z+r*[cos(tb),sin(tb)]
c = z+r*[cos(tc),sin(tc)]
'''
# center points on b
ab = a-b
cb = c-b
ac = a-c
# squared lengths
slab = ab[0]**2+ab[1]**2
slcb = cb[0]**2+cb[1]**2
# length
lac = (ac[0]**2+ac[1]**2)**.5
# this is from wikipedia http://en.wikipedia.org/wiki/Circumscribed_circle
D = 2*(ab[0]*cb[1]-ab[1]*cb[0])
z = np.array([ cb[1]*slab - ab[1]*slcb ,\
ab[0]*slcb - cb[0]*slab ])/D + b
# the angle a,b,c
t = np.abs( np.arctan(ab[1]/ab[0]) - np.arctan(cb[1]/cb[0]) )
# the angle a,z,c is 2*t
# the angles a,c,z and c,a,z are equal (isosolecsescscs triangle)
# a,c,z + c,a,z + a,z,c = 180
acz = np.pi/2-t
# d is the midpoint of ac
lad = lac/2 # the length of ad
# d,c,z is a right triangle with hypoteneuse az
# and since a,c,z = a,d,z
r = lad/np.cos(acz)
az = a-z
bz = b-z
cz = c-z
ta = np.arctan2(az[1],az[0])
tb = np.arctan2(bz[1],bz[0])
tc = np.arctan2(cz[1],cz[0])
return z,r,ta,tb,tc
def orthplane(xyz):
'''
xyz should be a 3 x 3 numpy array
returns the vector orthogonal to the plane passing through the rows of xyz such that all( np.dot(xyz,p) > 0 )
'''
# treat xyz[0] as origin
a,b,c = tuple(xyz[1]-xyz[0])
d,e,f = tuple(xyz[2]-xyz[0])
# cross product of other shifted vectors
p = np.array( [b*f-c*e,
c*d-a*f,
a*e-b*d])
return p/np.linalg.norm(p)
def commonHemisphere(xyz,getDisproof=False):
'''
xyz should be an n x 3 numpy array with point coordinates
if it exists, returns (p,None)
p is a 3-vector such that all( np.dot(xyz,p) > 0 )
p is orthogonal to the plane through the points indexed by inds
otherwise, returns (None,inds)
inds are the indices of 4 non-co-hemispherical points
inds are None if getDisproof is False (since these take extra time to compute with this implementation)
the plane through the origin and orthogonal to p has all points of xyz on the same side
this defines a hemisphere appropriate for gnomic projection
'''
n = xyz.shape[0]
if n < 4:
if n == 0:
return (np.array([1,0,0]),None)
if n == 1:
return (xyz,None)
if n == 2:
return (np.mean(xyz,0),None)
if n == 3:
return (orthplane(xyz),None)
for tri in combinations(xyz,3):
p = orthplane(tri)
if np.all(np.dot(xyz,p) > 0):
print np.dot(xyz,p)
return (p,None)
if not getDisproof:
return (None,None)
range1_4 = range(1,4)
range4 = range(4)
for quad in combinations(range(n),4):
for j in range4:
noj = [ quad[j-i] for i in range1_4 ]
p = orthplane(xyz[noj])
if np.dot(xyz[quad[j]],p) > 0:
# The convex hull of these four don't contain the origin
break
else:
# The loop exited normally
# The current quad is a counter example
return (None,quad)
print xyz
print "We shouldn't be here"
if __name__ == '__main__':
# Test common hemisphere finder
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
# xyz = np.random.randn(7,3)
# xyz = (xyz.T/norms(xyz)).T
xyz = np.array([[-0.30581918,-0.46686818,-0.82976426],
[ 0.59465481, 0.19030562, 0.78113342],
[ 0.8265863 ,-0.56278406,-0.00540285],
[-0.50141151, 0.78501969, 0.36377271],
[ 0.23231895, 0.90232697,-0.36308944],
[-0.33705904,-0.56767828, 0.75108759],
[-0.32538217, 0.94383169, 0.05751689]])
# p,pts = commonHemisphere(xyz,True)
ax.plot(xyz[:,0],xyz[:,1],xyz[:,2],'bo')
# if p is None:
# print 'disproof found'
# ax.plot([0,xyz[pts[0],0]],[0,xyz[pts[0],1]],[0,xyz[pts[0],2]],'bo-')
# ax.plot([0,xyz[pts[1],0]],[0,xyz[pts[1],1]],[0,xyz[pts[1],2]],'ko-')
# ax.plot([0,xyz[pts[2],0]],[0,xyz[pts[2],1]],[0,xyz[pts[2],2]],'go-')
# ax.plot([0,xyz[pts[3],0]],[0,xyz[pts[3],1]],[0,xyz[pts[3],2]],'ro-')
# else:
# print 'plane found'
# print np.dot(xyz,p)
# ax.plot([0,p[0]],[0,p[1]],[0,p[2]],'ko-')
# ax.plot([0],[0],[0],'r*')
# ax.plot(xyz[pts,0],xyz[pts,1],xyz[pts,2],'r*')
plt.show()
| gpl-3.0 |
pdellaert/ansible | hacking/aws_config/build_iam_policy_framework.py | 25 | 11861 | # Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)
| gpl-3.0 |
timmie/cartopy | lib/cartopy/examples/tick_labels.py | 6 | 1724 | __tags__ = ['Miscellanea']
"""
This example demonstrates adding tick labels to maps on rectangular
projections using special tick formatters.
"""
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import matplotlib.pyplot as plt
def main():
plt.figure(figsize=(8, 10))
# Label axes of a Plate Carree projection with a central longitude of 180:
ax1 = plt.subplot(211, projection=ccrs.PlateCarree(central_longitude=180))
ax1.set_global()
ax1.coastlines()
ax1.set_xticks([0, 60, 120, 180, 240, 300, 360], crs=ccrs.PlateCarree())
ax1.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True)
lat_formatter = LatitudeFormatter()
ax1.xaxis.set_major_formatter(lon_formatter)
ax1.yaxis.set_major_formatter(lat_formatter)
# Label axes of a Mercator projection without degree symbols in the labels
# and formatting labels to include 1 decimal place:
ax2 = plt.subplot(212, projection=ccrs.Mercator())
ax2.set_global()
ax2.coastlines()
ax2.set_xticks([-180, -120, -60, 0, 60, 120, 180], crs=ccrs.PlateCarree())
ax2.set_yticks([-78.5, -60, -25.5, 25.5, 60, 80], crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(number_format='.1f',
degree_symbol='',
dateline_direction_label=True)
lat_formatter = LatitudeFormatter(number_format='.1f',
degree_symbol='')
ax2.xaxis.set_major_formatter(lon_formatter)
ax2.yaxis.set_major_formatter(lat_formatter)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
trachelr/mne-python | examples/inverse/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <luke.bloy@gmail.com>
# Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
srodney/hstsntools | filters.py | 1 | 9454 | # 2014.04.29
# S.Rodney
# HST Filter transmission curves: plotting and such
import numpy as np
from matplotlib import pylab as pl
import os
topdir = os.path.abspath( '.' )
try :
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST_CANDELS')
w435, f435 = np.loadtxt( 'ACS_WFC_F435W.dat', unpack=True )
w606, f606 = np.loadtxt( 'ACS_WFC_F606W.dat', unpack=True )
w625, f625 = np.loadtxt( 'ACS_WFC_F625W.dat', unpack=True )
w814, f814 = np.loadtxt( 'ACS_WFC_F814W.dat', unpack=True )
w350, f350 = np.loadtxt( 'WFC3_UVIS_F350LP.dat', unpack=True )
w606u, f606u = np.loadtxt( 'WFC3_UVIS_F606W.dat', unpack=True )
w763u, f763u = np.loadtxt( 'WFC3_UVIS_F763M.dat', unpack=True )
w845u, f845u = np.loadtxt( 'WFC3_UVIS_F845M.dat', unpack=True )
w127, f127 = np.loadtxt( 'WFC3_IR_F127M.dat', unpack=True )
w125, f125 = np.loadtxt( 'WFC3_IR_F125W.dat', unpack=True )
w160, f160 = np.loadtxt( 'WFC3_IR_F160W.dat', unpack=True )
w153, f153 = np.loadtxt( 'WFC3_IR_F153M.dat', unpack=True )
w139, f139 = np.loadtxt( 'WFC3_IR_F139M.dat', unpack=True )
w140, f140 = np.loadtxt( 'WFC3_IR_F140W.dat', unpack=True )
os.chdir( sndataroot+'/filters/Bessell90')
wB, fB = np.loadtxt( 'Bessell90_B.dat', unpack=True )
wV, fV = np.loadtxt( 'Bessell90_V.dat', unpack=True )
wR, fR = np.loadtxt( 'Bessell90_R.dat', unpack=True )
wI, fI = np.loadtxt( 'Bessell90_I.dat', unpack=True )
except KeyError :
pass
finally :
os.chdir(topdir)
def filtername2datfile( filtername, camera=None):
""" Given an abbreviated filter name, returns the name of the .dat file
containing the transmission curve.
"""
fname = filtername.upper()
if fname.startswith('F1') : return( 'WFC3_IR_%s.dat'%fname )
elif 'UV' in camera.upper():
return( 'WFC3_UVIS_%s.dat'%fname )
elif 'ACS' in camera.upper():
return( 'ACS_WFC_%s.dat'%fname )
elif fname=='F350LP' :
return( 'WFC3_UVIS_%s.dat'%fname )
else :
print("Must specify a camera for filter %s."%fname)
return(None)
def computeScaling( filt1, filt2, camera1=None, camera2=None ) :
"""determine the flux scaling factor that should be multiplied to
filt1 to match the throughput of filt2. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if not filt1.endswith('.dat') or not filt2.endswith('.dat') :
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters 1 and 2
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
# divide
return( int2 / int1 )
def computeScaling2to1( filt1, filt2, filt3,
camera1=None, camera2=None, camera3=None) :
"""Determine the flux scaling factor for matching the sum of filt1+filt2
to filt3. This returns the value that should be multiplied to
(filt1+filt2) to match the throughput of filt3. This returns just a
single number, effectively assuming the source SED is flat across
the bandpass, so that we just need to correct for total
throughput, not for the shape of the filter.
"""
from scipy import integrate as scint
if filt1.lower().startswith('f') :
filt1 = filtername2datfile( filt1, camera=camera1 )
if filt2.lower().startswith('f') :
filt2 = filtername2datfile( filt2, camera=camera2 )
if filt3.lower().startswith('f') :
filt3 = filtername2datfile( filt3, camera=camera3 )
if not (filt1.endswith('.dat') and filt2.endswith('.dat')
and filt3.endswith('.dat') ):
print("Must specify a filter name (e.g. F160W) or a .dat file.")
return( None )
# read in the transmission curves for filters
topdir = os.path.abspath( '.' )
sndataroot = os.environ['SNDATA_ROOT']
os.chdir( sndataroot+'/filters/HST')
w1, f1 = np.loadtxt( filt1, unpack=True )
w2, f2 = np.loadtxt( filt2, unpack=True )
w3, f3 = np.loadtxt( filt3, unpack=True )
os.chdir( topdir )
# integrate
int1 = scint.simps( f1, w1 )
int2 = scint.simps( f2, w2 )
int3 = scint.simps( f3, w3 )
# sum and divide
return( int3 / (int1+int2) )
def plotmedbands( z = 2, day=5 ):
from hstsntools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.dat', day=day )
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
clf()
ax1 = subplot(3,1,1)
plot(w125, f125, 'b--', label='F125W')
plot(w127, f127, 'b-', label='F127M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax1.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax1.set_xlim( 9000, 20000 )
ax1.text(9500,0.2, 'SNIa\nz=%.1f\nt=%i'%(z,day), color='r',ha='left',va='bottom')
setp(ax1.get_xticklabels(), visible=False)
setp(ax1.get_yticklabels(), visible=False)
ax2 = subplot(3,1,2, sharex=ax1, sharey=ax1)
plot(w140, f140, 'g--',label='F140W')
plot(w139, f139, 'g-',label='F139M')
plot(w1az, f1az, 'r-', label='_nolegend_')
ax2.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax2.set_xlim( 9000, 20000 )
setp(ax2.get_xticklabels(), visible=False)
setp(ax2.get_yticklabels(), visible=False)
ax2.set_ylabel('Flux / Transmission (arbitrary units)')
ax3= subplot(3,1,3, sharex=ax1, sharey=ax1)
plot(w160, f160, 'm--',label='F160W')
plot(w153, f153, 'm-',label='F153M')
plot(w1az, f1az, 'r-',label='_nolegend_')
ax3.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
setp(ax3.get_yticklabels(), visible=False)
ax1.set_xlim( 9000, 20000 )
ax1.set_xlabe
l('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotbroadbandz( zvals=[1,1.5,2.0], day=0 ):
""" show how broad bands cover the SED at high z"""
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=day )
print("SALT2")
# w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_0.dat', day=day )
#w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/models/SALT2/SALT2.Guy10_UV2IR/salt2_template_1.dat', day=day )
#wII, fII = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000018.DAT', day=0 )
#wIb, fIb = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/non1a/SDSS-000020.DAT', day=0 )
clf()
i = 0
for z in zvals:
i+=1
w1az = w1a * (1+z)
f1az = f1a / f1a.max() / 2.
#wII = wII * (1+z)
#fII = fII / fII.max() / 2.
#wIb = wIb * (1+z)
#fIb = fIb / fIb.max() / 2.
ax = subplot(3,1,i)
plot(w350, f350, 'b--', label='F350LP(W)')
plot(w125, f125, 'g--', label='F125W(J)')
plot(w160, f160, 'r--', label='F160W(H)')
plot(w1az, f1az, 'k-', label='_nolegend_')
#ax.legend( loc='upper right', frameon=False, numpoints=2, handlelen=0.2, labelspacing=0.1 )
ax.set_xlim( 3000, 20000 )
ax.text(0.98,0.95, 'z=%.1f'%(z), color='k',ha='right',va='top',transform=ax.transAxes)
setp(ax.get_yticklabels(), visible=False)
if i==1 :
top = ax.get_ylim()[1]
ax.text(16000,top, 'F160W(H)', color='r',ha='center',va='bottom')
ax.text(12500,top, 'F125W(J)', color='g',ha='center',va='bottom')
ax.text(3500,top, 'F350LP(W)', color='b',ha='left',va='bottom')
if i<3 :
setp(ax.get_xticklabels(), visible=False)
if i==2 :
ax.set_ylabel('Flux or Transmission (arbitrary units)')
if i==3 :
ax.set_xlabel('observed wavelength (Angstroms)')
fig = gcf()
fig.subplots_adjust( wspace=0, hspace=0, left=0.05, bottom=0.12, right=0.95, top=0.95)
def plotBVRI( ):
""" show how broad ACS bands cover the SN SED """
from hstsnpipe import tools
from tools import snana
w1a, f1a = snana.snsed.getsed( sedfile='/usr/local/SNDATA_ROOT/snsed/Hsiao07.extrap.dat', day=0 )
clf()
f1a = f1a / f1a.max()
plot(wB, fB, 'b--', label='B')
plot(wV, fV, 'g--', label='V')
plot(wR, fR, 'r--', label='R')
plot(wI, fI, 'k--', label='I')
plot(w435, f435, 'b-', label='F435W')
plot(w606, f606, 'g-', label='F606W')
plot(w625, f625, 'r-', label='F625W')
plot(w814, f814, 'k-', label='F814W')
plot(w1a, f1a, 'k-', label='_nolegend_')
ax = gca()
ax.set_xlim( 3000, 10000 )
#setp(ax.get_yticklabels(), visible=False)
| mit |
datapythonista/pandas | pandas/tests/series/methods/test_describe.py | 2 | 4855 | import numpy as np
from pandas import (
Period,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDescribe:
def test_describe_ints(self):
ser = Series([0, 1, 2, 3, 4], name="int_data")
result = ser.describe()
expected = Series(
[5, 2, ser.std(), 0, 1, 2, 3, 4],
name="int_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
def test_describe_bools(self):
ser = Series([True, True, False, False, False], name="bool_data")
result = ser.describe()
expected = Series(
[5, 2, False, 3], name="bool_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
def test_describe_strs(self):
ser = Series(["a", "a", "b", "c", "d"], name="str_data")
result = ser.describe()
expected = Series(
[5, 4, "a", 2], name="str_data", index=["count", "unique", "top", "freq"]
)
tm.assert_series_equal(result, expected)
def test_describe_timedelta64(self):
ser = Series(
[
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
Timedelta("5 days"),
],
name="timedelta_data",
)
result = ser.describe()
expected = Series(
[5, ser[2], ser.std(), ser[0], ser[1], ser[2], ser[3], ser[4]],
name="timedelta_data",
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
def test_describe_period(self):
ser = Series(
[Period("2020-01", "M"), Period("2020-01", "M"), Period("2019-12", "M")],
name="period_data",
)
result = ser.describe()
expected = Series(
[3, 2, ser[0], 2],
name="period_data",
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
def test_describe_empty_object(self):
# https://github.com/pandas-dev/pandas/issues/27183
s = Series([None, None], dtype=object)
result = s.describe()
expected = Series(
[0, 0, np.nan, np.nan],
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_series_equal(result, expected)
result = s[:0].describe()
tm.assert_series_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2])
assert np.isnan(result.iloc[3])
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = str(tz_naive_fixture)
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe(datetime_is_numeric=True)
expected = Series(
[
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s[1],
s[2],
s[3],
end.tz_localize(tz),
],
name=name,
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
def test_describe_with_tz_warns(self):
name = tz = "CET"
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
with tm.assert_produces_warning(FutureWarning):
result = s.describe()
expected = Series(
[
5,
5,
s.value_counts().index[0],
1,
start.tz_localize(tz),
end.tz_localize(tz),
],
name=name,
index=["count", "unique", "top", "freq", "first", "last"],
)
tm.assert_series_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
s = Series(date_range("2012", periods=3))
result = s.describe(datetime_is_numeric=True)
expected = Series(
[
3,
Timestamp("2012-01-02"),
Timestamp("2012-01-01"),
Timestamp("2012-01-01T12:00:00"),
Timestamp("2012-01-02"),
Timestamp("2012-01-02T12:00:00"),
Timestamp("2012-01-03"),
],
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
lucapinello/CRISPResso | CRISPResso/CRISPRessoCORE.py | 1 | 127566 | #!/usr/bin/env python
# -*- coding: utf8 -*-
'''
CRISPResso - Luca Pinello 2015
Software pipeline for the analysis of CRISPR-Cas9 genome editing outcomes from deep sequencing data
https://github.com/lucapinello/CRISPResso
'''
__version__ = "1.0.13"
import sys
import errno
import os
import subprocess as sb
import argparse
import re
import gzip
from collections import defaultdict
import multiprocessing as mp
import cPickle as cp
import unicodedata
import traceback
import logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)-5s @ %(asctime)s:\n\t %(message)s \n',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
error = logging.critical
warn = logging.warning
debug = logging.debug
info = logging.info
####Support functions###
_ROOT = os.path.abspath(os.path.dirname(__file__))
def get_data(path):
return os.path.join(_ROOT, 'data', path)
def check_library(library_name):
try:
return __import__(library_name)
except:
error('You need to install %s module to use CRISPResso!' % library_name)
sys.exit(1)
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def check_program(binary_name,download_url=None):
if not which(binary_name):
error('You need to install and have the command #####%s##### in your PATH variable to use CRISPResso!\n Please read the documentation!' % binary_name)
if download_url:
error('You can download it from here:%s' % download_url)
sys.exit(1)
def check_file(filename):
try:
with open(filename): pass
except IOError:
raise Exception('I cannot open the file: '+filename)
def force_symlink(src, dst):
if os.path.exists(dst) and os.path.samefile(src,dst):
return
try:
os.symlink(src, dst)
except OSError as exc:
if exc.errno == errno.EEXIST:
os.remove(dst)
os.symlink(src, dst)
nt_complement=dict({'A':'T','C':'G','G':'C','T':'A','N':'N','_':'_','-':'-'})
def reverse_complement(seq):
return "".join([nt_complement[c] for c in seq.upper()[-1::-1]])
def find_wrong_nt(sequence):
return list(set(sequence.upper()).difference(set(['A','T','C','G','N'])))
def get_ids_reads_to_remove(fastq_filename,min_bp_quality=20,min_single_bp_quality=0):
ids_to_remove=set()
if fastq_filename.endswith('.gz'):
fastq_handle=gzip.open(fastq_filename)
else:
fastq_handle=open(fastq_filename)
for record in SeqIO.parse(fastq_handle, "fastq"):
if np.array(record.letter_annotations["phred_quality"]).mean()<min_bp_quality \
or np.array(record.letter_annotations["phred_quality"]).min()<min_single_bp_quality:
ids_to_remove.add(record.id)
return ids_to_remove
def filter_pe_fastq_by_qual(fastq_r1,fastq_r2,output_filename_r1=None,output_filename_r2=None,min_bp_quality=20,min_single_bp_quality=0):
ids_to_remove_s1=get_ids_reads_to_remove(fastq_r1,min_bp_quality=min_bp_quality,min_single_bp_quality=min_single_bp_quality)
ids_to_remove_s2=get_ids_reads_to_remove(fastq_r2,min_bp_quality=min_bp_quality,min_single_bp_quality=min_single_bp_quality)
ids_to_remove=ids_to_remove_s1.union(ids_to_remove_s2)
if fastq_r1.endswith('.gz'):
fastq_handle_r1=gzip.open(fastq_r1)
else:
fastq_handle_r1=open(fastq_r1)
if fastq_r2.endswith('.gz'):
fastq_handle_r2=gzip.open(fastq_r2)
else:
fastq_handle_r2=open(fastq_r2)
if not output_filename_r1:
output_filename_r1=fastq_r1.replace('.fastq','').replace('.gz','')+'_filtered.fastq.gz'
if not output_filename_r2:
output_filename_r2=fastq_r2.replace('.fastq','').replace('.gz','')+'_filtered.fastq.gz'
#we cannot use with on gzip with python 2.6 :(
try:
fastq_filtered_outfile_r1=gzip.open(output_filename_r1,'w+')
for record in SeqIO.parse(fastq_handle_r1, "fastq"):
if not record.id in ids_to_remove:
fastq_filtered_outfile_r1.write(record.format('fastq'))
except:
raise Exception('Error handling the fastq_filtered_outfile_r1')
try:
fastq_filtered_outfile_r2=gzip.open(output_filename_r2,'w+')
for record in SeqIO.parse(fastq_handle_r2, "fastq"):
if not record.id in ids_to_remove:
fastq_filtered_outfile_r2.write(record.format('fastq'))
except:
raise Exception('Error handling the fastq_filtered_outfile_r2')
return output_filename_r1,output_filename_r2
def filter_se_fastq_by_qual(fastq_filename,output_filename=None,min_bp_quality=20,min_single_bp_quality=0):
if fastq_filename.endswith('.gz'):
fastq_handle=gzip.open(fastq_filename)
else:
fastq_handle=open(fastq_filename)
if not output_filename:
output_filename=fastq_filename.replace('.fastq','').replace('.gz','')+'_filtered.fastq.gz'
try:
fastq_filtered_outfile=gzip.open(output_filename,'w+')
for record in SeqIO.parse(fastq_handle, "fastq"):
if np.array(record.letter_annotations["phred_quality"]).mean()>=min_bp_quality \
and np.array(record.letter_annotations["phred_quality"]).min()>=min_single_bp_quality:
fastq_filtered_outfile.write(record.format('fastq'))
except:
raise Exception('Error handling the fastq_filtered_outfile')
return output_filename
def get_avg_read_lenght_fastq(fastq_filename):
cmd=('z' if fastq_filename.endswith('.gz') else '' ) +('cat < %s' % fastq_filename)+\
r''' | awk 'BN {n=0;s=0;} NR%4 == 2 {s+=length($0);n++;} END { printf("%d\n",s/n)}' '''
p = sb.Popen(cmd, shell=True,stdout=sb.PIPE)
return int(p.communicate()[0].strip())
def get_n_reads_fastq(fastq_filename):
p = sb.Popen(('z' if fastq_filename.endswith('.gz') else '' ) +"cat < %s | wc -l" % fastq_filename , shell=True,stdout=sb.PIPE)
return int(float(p.communicate()[0])/4.0)
matplotlib=check_library('matplotlib')
from matplotlib import font_manager as fm
font = {'size' : 22}
matplotlib.rc('font', **font)
matplotlib.use('Agg')
plt=check_library('pylab')
from matplotlib import font_manager as fm
from matplotlib import colors as colors_mpl
import matplotlib.gridspec as gridspec
pd=check_library('pandas')
np=check_library('numpy')
Bio=check_library('Bio')
check_program('java')
check_program('flash')
check_program('needle')
sns=check_library('seaborn')
sns.set_context('poster')
sns.set(font_scale=2.2)
sns.set_style('white')
from Bio import SeqIO,pairwise2
#########################################
###EXCEPTIONS############################
class FlashException(Exception):
pass
class TrimmomaticException(Exception):
pass
class NeedleException(Exception):
pass
class NoReadsAlignedException(Exception):
pass
class DonorSequenceException(Exception):
pass
class AmpliconEqualDonorException(Exception):
pass
class CoreDonorSequenceNotContainedException(Exception):
pass
class CoreDonorSequenceNotUniqueException(Exception):
pass
class SgRNASequenceException(Exception):
pass
class NTException(Exception):
pass
class ExonSequenceException(Exception):
pass
class DuplicateSequenceIdException(Exception):
pass
class NoReadsAfterQualityFiltering(Exception):
pass
#########################################
def process_df_chunk(df_needle_alignment_chunk):
MODIFIED_FRAMESHIFT=0
MODIFIED_NON_FRAMESHIFT=0
NON_MODIFIED_NON_FRAMESHIFT=0
SPLICING_SITES_MODIFIED=0
#INITIALIZATIONS
if args.coding_seq:
PERFORM_FRAMESHIFT_ANALYSIS=True
else:
PERFORM_FRAMESHIFT_ANALYSIS=False
effect_vector_insertion=np.zeros(len_amplicon)
effect_vector_deletion=np.zeros(len_amplicon)
effect_vector_mutation=np.zeros(len_amplicon)
effect_vector_any=np.zeros(len_amplicon)
effect_vector_insertion_mixed=np.zeros(len_amplicon)
effect_vector_deletion_mixed=np.zeros(len_amplicon)
effect_vector_mutation_mixed=np.zeros(len_amplicon)
effect_vector_insertion_hdr=np.zeros(len_amplicon)
effect_vector_deletion_hdr=np.zeros(len_amplicon)
effect_vector_mutation_hdr=np.zeros(len_amplicon)
effect_vector_insertion_noncoding=np.zeros(len_amplicon)
effect_vector_deletion_noncoding=np.zeros(len_amplicon)
effect_vector_mutation_noncoding=np.zeros(len_amplicon)
hist_inframe=defaultdict(lambda :0)
hist_frameshift=defaultdict(lambda :0)
avg_vector_del_all=np.zeros(len_amplicon)
avg_vector_ins_all=np.zeros(len_amplicon)
re_find_indels=re.compile("(-*-)")
re_find_substitutions=re.compile("(\.*\.)")
for idx_row,row in df_needle_alignment_chunk.iterrows():
#GET THE MUTATIONS POSITIONS
if row.UNMODIFIED:
continue
if PERFORM_FRAMESHIFT_ANALYSIS:
lenght_modified_positions_exons=[]
current_read_exons_modified=False
current_read_spliced_modified=False
#quantify substitution
substitution_positions=[]
if not args.ignore_substitutions:
for p in re_find_substitutions.finditer(row.align_str):
st,en=p.span()
substitution_positions.append(row.ref_positions[st:en])
if substitution_positions:
substitution_positions=list(np.hstack(substitution_positions))
#quantify deletion
deletion_positions=[]
deletion_positions_flat=[]
deletion_sizes=[]
if not args.ignore_deletions:
for p in re_find_indels.finditer(row.align_seq):
st,en=p.span()
deletion_positions.append(row.ref_positions[st:en])
deletion_sizes.append(en-st)
if deletion_positions:
deletion_positions_flat=np.hstack(deletion_positions)
#quantify insertion
insertion_positions=[]
insertion_sizes=[]
insertion_positions_flat=[]
if not args.ignore_insertions:
for p in re_find_indels.finditer(row.ref_seq):
st,en=p.span()
#ref_st=row.ref_positions[st-1] # we report the base preceding the insertion
#insertion_positions.append(ref_st)
insertion_positions.append([row['ref_positions'][max(0,st-1)],row['ref_positions'][min(len(row['ref_positions'])-1,en)]])
insertion_sizes.append(en-st)
if insertion_positions:
insertion_positions_flat=np.hstack(insertion_positions)
########CLASSIFY READ
#WE HAVE THE DONOR SEQUENCE
if args.expected_hdr_amplicon_seq:
#HDR
if (row.score_diff<0) & (row.score_repaired>=args.hdr_perfect_alignment_threshold):
df_needle_alignment_chunk.ix[idx_row,'HDR']=True
#MIXED
elif (row.score_diff<0) & (row.score_repaired<args.hdr_perfect_alignment_threshold):
df_needle_alignment_chunk.ix[idx_row,'MIXED']=True
else:
#NHEJ
if include_idxs.intersection(substitution_positions) \
or include_idxs.intersection(insertion_positions_flat) or \
include_idxs.intersection(deletion_positions_flat):
df_needle_alignment_chunk.ix[idx_row,'NHEJ']=True
#UNMODIFIED
else:
df_needle_alignment_chunk.ix[idx_row,'UNMODIFIED']=True
#NO DONOR SEQUENCE PROVIDED
else:
#NHEJ
if include_idxs.intersection(substitution_positions) \
or include_idxs.intersection(insertion_positions_flat) or \
include_idxs.intersection(deletion_positions_flat):
df_needle_alignment_chunk.ix[idx_row,'NHEJ']=True
#UNMODIFIED
else:
df_needle_alignment_chunk.ix[idx_row,'UNMODIFIED']=True
###CREATE AVERAGE SIGNALS, HERE WE SHOW EVERYTHING...
if df_needle_alignment_chunk.ix[idx_row,'MIXED']:
effect_vector_mutation_mixed[substitution_positions]+=1
effect_vector_deletion_mixed[deletion_positions_flat]+=1
effect_vector_insertion_mixed[insertion_positions_flat]+=1
elif df_needle_alignment_chunk.ix[idx_row,'HDR']:
effect_vector_mutation_hdr[substitution_positions]+=1
effect_vector_deletion_hdr[deletion_positions_flat]+=1
effect_vector_insertion_hdr[insertion_positions_flat]+=1
elif df_needle_alignment_chunk.ix[idx_row,'NHEJ'] and not args.hide_mutations_outside_window_NHEJ:
effect_vector_mutation[substitution_positions]+=1
effect_vector_deletion[deletion_positions_flat]+=1
effect_vector_insertion[insertion_positions_flat]+=1
any_positions=np.unique(np.hstack([deletion_positions_flat,insertion_positions_flat,substitution_positions])).astype(int)
effect_vector_any[any_positions]+=1
#For NHEJ we count only the events that overlap the window specified around
#the cut site (1bp by default)...
if df_needle_alignment_chunk.ix[idx_row,'NHEJ'] and args.window_around_sgrna:
substitution_positions=list(include_idxs.intersection(substitution_positions))
insertion_positions_window=[]
insertion_sizes_window=[]
#count insertions overlapping
for idx_ins,ins_pos_set in enumerate(insertion_positions):
#print ref_st, insertion_positions
if include_idxs.intersection(ins_pos_set):
insertion_positions_window.append(ins_pos_set)
insertion_sizes_window.append(insertion_sizes[idx_ins])
insertion_positions=insertion_positions_window
insertion_sizes=insertion_sizes_window
deletion_positions_window=[]
deletion_sizes_window=[]
for idx_del,del_pos_set in enumerate(deletion_positions):
if include_idxs.intersection(del_pos_set):
deletion_positions_window.append(del_pos_set)
deletion_sizes_window.append(deletion_sizes[idx_del])
deletion_positions=deletion_positions_window
deletion_sizes=deletion_sizes_window
if deletion_positions:
deletion_positions_flat=np.hstack(deletion_positions)
if df_needle_alignment_chunk.ix[idx_row,'NHEJ'] and args.hide_mutations_outside_window_NHEJ:
effect_vector_mutation[substitution_positions]+=1
effect_vector_deletion[deletion_positions_flat]+=1
effect_vector_insertion[insertion_positions_flat]+=1
####QUANTIFICATION AND FRAMESHIFT ANALYSIS
if not df_needle_alignment_chunk.ix[idx_row,'UNMODIFIED']:
df_needle_alignment_chunk.ix[idx_row,'n_mutated']=len(substitution_positions)
df_needle_alignment_chunk.ix[idx_row,'n_inserted']=np.sum(insertion_sizes)
df_needle_alignment_chunk.ix[idx_row,'n_deleted']=np.sum(deletion_sizes)
for idx_ins,ins_pos_set in enumerate(insertion_positions):
avg_vector_ins_all[ins_pos_set]+=insertion_sizes[idx_ins]
if PERFORM_FRAMESHIFT_ANALYSIS:
if set(exon_positions).intersection(ins_pos_set): # check that we are inserting in one exon
lenght_modified_positions_exons.append(insertion_sizes[idx_ins])
current_read_exons_modified=True
for idx_del,del_pos_set in enumerate(deletion_positions):
avg_vector_del_all[del_pos_set]+=deletion_sizes[idx_del]
if PERFORM_FRAMESHIFT_ANALYSIS:
del_positions_to_append=sorted(set(exon_positions).intersection(set(deletion_positions_flat)))
if del_positions_to_append:
#Always use the low include upper not
current_read_exons_modified=True
lenght_modified_positions_exons.append(-len(del_positions_to_append))
if set(exon_positions).intersection(substitution_positions):
current_read_exons_modified=True
if set(splicing_positions).intersection(substitution_positions):
current_read_spliced_modified=True
if set(splicing_positions).intersection(deletion_positions_flat):
current_read_spliced_modified=True
if set(splicing_positions).intersection(insertion_positions_flat):
current_read_spliced_modified=True
if current_read_spliced_modified:
SPLICING_SITES_MODIFIED+=1
#if modified check if frameshift
if current_read_exons_modified:
if not lenght_modified_positions_exons:
#there are no indels
MODIFIED_NON_FRAMESHIFT+=1
hist_inframe[0]+=1
else:
effetive_length=sum(lenght_modified_positions_exons)
if (effetive_length % 3 )==0:
MODIFIED_NON_FRAMESHIFT+=1
hist_inframe[effetive_length]+=1
else:
MODIFIED_FRAMESHIFT+=1
hist_frameshift[effetive_length]+=1
#the indels and subtitutions are outside the exon/s so we don't care!
else:
NON_MODIFIED_NON_FRAMESHIFT+=1
effect_vector_insertion_noncoding[insertion_positions_flat]+=1
effect_vector_deletion_noncoding[deletion_positions_flat]+=1
effect_vector_mutation_noncoding[substitution_positions]+=1
hist_inframe=dict(hist_inframe)
hist_frameshift=dict(hist_frameshift)
return df_needle_alignment_chunk, effect_vector_insertion,effect_vector_deletion,\
effect_vector_mutation,effect_vector_any,effect_vector_insertion_mixed,effect_vector_deletion_mixed,\
effect_vector_mutation_mixed,effect_vector_insertion_hdr,effect_vector_deletion_hdr,effect_vector_mutation_hdr,\
effect_vector_insertion_noncoding,effect_vector_deletion_noncoding,effect_vector_mutation_noncoding,hist_inframe,\
hist_frameshift,avg_vector_del_all,avg_vector_ins_all,MODIFIED_FRAMESHIFT,MODIFIED_NON_FRAMESHIFT,NON_MODIFIED_NON_FRAMESHIFT,\
SPLICING_SITES_MODIFIED
def add_hist(hist_to_add,hist_global):
for key,value in hist_to_add.iteritems():
hist_global[key]+=value
return hist_global
def slugify(value): #adapted from the Django project
value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '_', value).strip())
value = unicode(re.sub('[-\s]+', '-', value))
return str(value)
def split_paired_end_reads_single_file(fastq_filename,output_filename_r1,output_filename_r2):
if fastq_filename.endswith('.gz'):
fastq_handle=gzip.open(fastq_filename)
else:
fastq_handle=open(fastq_filename)
#we cannot use with on gzip with python 2.6 :(
try:
fastq_splitted_outfile_r1=gzip.open(output_filename_r1,'w+')
fastq_splitted_outfile_r2=gzip.open(output_filename_r2,'w+')
[fastq_splitted_outfile_r1.write(line) if (i % 8 < 4) else fastq_splitted_outfile_r2.write(line) for i, line in enumerate(fastq_handle)]
except:
raise Exception('Error handling the splitting operation')
return output_filename_r1,output_filename_r2
def get_row_around_cut(row,cut_point,offset):
cut_idx=row['ref_positions'].index(cut_point)
return row['Aligned_Sequence'][cut_idx-offset+1:cut_idx+offset+1],row['Reference_Sequence'][cut_idx-offset+1:cut_idx+offset+1],row['UNMODIFIED'],row['%Reads'], row['#Reads']
def get_dataframe_around_cut(df_alleles, cut_point,offset):
df_alleles_around_cut=pd.DataFrame(list(df_alleles.apply(lambda row: get_row_around_cut(row,cut_point,offset),axis=1).values),
columns=['Aligned_Sequence','Reference_Sequence','Unedited','%Reads','#Reads'])
df_alleles_around_cut=df_alleles_around_cut.groupby(['Aligned_Sequence','Reference_Sequence']).sum().reset_index().set_index('Aligned_Sequence')
df_alleles_around_cut.sort_values(by='%Reads',inplace=True,ascending=False)
df_alleles_around_cut['Unedited']=df_alleles_around_cut['Unedited']>0
return df_alleles_around_cut
#We need to customize the seaborn heatmap class and function
class Custom_HeatMapper(sns.matrix._HeatMapper):
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws,per_element_annot_kws,cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
super(Custom_HeatMapper, self).__init__(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels, yticklabels, mask)
if annot is not None:
if per_element_annot_kws is None:
self.per_element_annot_kws=np.empty_like(annot,dtype=np.object)
self.per_element_annot_kws[:]=dict()
else:
self.per_element_annot_kws=per_element_annot_kws
#add per element dict to syle the annotatiin
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
xpos, ypos = np.meshgrid(ax.get_xticks(), ax.get_yticks())
for x, y, m, color, val,per_element_dict in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat,self.per_element_annot_kws.flat):
#print per_element_dict
if m is not np.ma.masked:
l = sns.utils.relative_luminance(color)
text_color = ".15" if l > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
text_kwargs.update(per_element_dict)
ax.text(x, y, annotation, **text_kwargs)
#removed the colobar
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
sns.utils.despine(ax=ax, left=True, bottom=True)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, vmin=self.vmin, vmax=self.vmax,
cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Add row and column labels
ax.set(xticks=self.xticks, yticks=self.yticks)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation="vertical")
# Possibly rotate them if they overlap
plt.draw()
if sns.utils.axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if sns.utils.axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
def custom_heatmap(data, vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,per_element_annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, ax=None, xticklabels=True, yticklabels=True,
mask=None,
**kwargs):
# Initialize the plotter object
plotter = Custom_HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, per_element_annot_kws,cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
def plot_alleles_table(reference_seq,cut_point,df_alleles,sgRNA_name,OUTPUT_DIRECTORY,MIN_FREQUENCY=0.5,MAX_N_ROWS=100):
#bp we are plotting on each side
offset_around_cut_to_plot=len(df_alleles.index[0])/2
# make a color map of fixed colors
alpha=0.5
get_color=lambda x,y,z: (x/255.0,y/255.0,z/255.0,alpha)
A_color=get_color(127,201,127)
T_color=get_color(190,174,212)
C_color=get_color(253,192,134)
G_color=get_color(255,255,153)
N_color=get_color(255,255,255)
INDEL_color=get_color(230,230,230)
cmap = colors_mpl.ListedColormap([INDEL_color, A_color,T_color,C_color,G_color,N_color])
dna_to_numbers={'-':0,'A':1,'T':2,'C':3,'G':4,'N':5}
seq_to_numbers= lambda seq: [dna_to_numbers[x] for x in seq]
X=[]
annot=[]
y_labels=[]
lines=defaultdict(list)
re_find_indels=re.compile("(-*-)")
per_element_annot_kws=[]
idx_row=0
for idx,row in df_alleles.ix[df_alleles['%Reads']>=MIN_FREQUENCY][:MAX_N_ROWS].iterrows():
X.append(seq_to_numbers(str.upper(idx)))
annot.append(list(idx))
y_labels.append('%.2f%% (%d reads)' % (row['%Reads'],row['#Reads']))
for p in re_find_indels.finditer(row['Reference_Sequence']):
lines[idx_row].append((p.start(),p.end()))
idx_row+=1
idxs_sub= [i_sub for i_sub in range(len(idx)) if \
(row['Reference_Sequence'][i_sub]!=idx[i_sub]) and \
(row['Reference_Sequence'][i_sub]!='-') and\
(idx[i_sub]!='-')]
to_append=np.array([{}]*len(idx),dtype=np.object)
to_append[ idxs_sub]={'weight':'bold', 'color':'black','size':16}
per_element_annot_kws.append(to_append)
ref_seq_around_cut=reference_seq[cut_point-offset_around_cut_to_plot+1:cut_point+offset_around_cut_to_plot+1]
per_element_annot_kws=np.vstack(per_element_annot_kws[::-1])
ref_seq_hm=np.expand_dims(seq_to_numbers(ref_seq_around_cut),1).T
ref_seq_annot_hm=np.expand_dims(list(ref_seq_around_cut),1).T
NEW_SEABORN=np.sum(np.array(map(int,sns.__version__.split('.')))*(100,10,1))>= 80
if NEW_SEABORN:
annot=annot[::-1]
X=X[::-1]
sns.set_context('poster')
N_ROWS=len(X)
N_COLUMNS=offset_around_cut_to_plot*2
fig=plt.figure(figsize=(offset_around_cut_to_plot*0.6,(N_ROWS+1)*0.6))
gs1 = gridspec.GridSpec(N_ROWS+1,N_COLUMNS)
gs2 = gridspec.GridSpec(N_ROWS+1,N_COLUMNS)
ax_hm_ref=plt.subplot(gs1[0, :])
ax_hm=plt.subplot(gs2[1:, :])
custom_heatmap(ref_seq_hm,annot=ref_seq_annot_hm,annot_kws={'size':16},cmap=cmap,fmt='s',ax=ax_hm_ref,vmin=0,vmax=5,square=True)
custom_heatmap(X,annot=np.array(annot),annot_kws={'size':16},cmap=cmap,fmt='s',ax=ax_hm,square=True, vmin=0, vmax=5, per_element_annot_kws=per_element_annot_kws)
ax_hm.yaxis.tick_right()
ax_hm.yaxis.set_ticklabels(y_labels[::-1],rotation=True),
ax_hm.xaxis.set_ticks([])
#print lines
#cut point vertical line
ax_hm.vlines([offset_around_cut_to_plot],*ax_hm.get_ylim(),linestyles='dashed')
#create boxes for ins
for idx,lss in lines.iteritems():
for ls in lss:
for l in ls:
ax_hm.vlines([l],N_ROWS-idx-1,N_ROWS-idx,color='red',lw=3)
ax_hm.hlines(N_ROWS-idx-1,ls[0],ls[1],color='red',lw=3)
ax_hm.hlines(N_ROWS-idx,ls[0],ls[1],color='red',lw=3)
ax_hm_ref.yaxis.tick_right()
ax_hm_ref.xaxis.set_ticks([])
ax_hm_ref.yaxis.set_ticklabels(['Reference'],rotation=True)
gs2.update(left=0,right=1, hspace=0.05,wspace=0,top=1*(((N_ROWS)*1.13))/(N_ROWS))
gs1.update(left=0,right=1, hspace=0.05,wspace=0,)
sns.set_context(rc={'lines.markeredgewidth': 1,'mathtext.fontset' : 'stix','text.usetex':True,'text.latex.unicode':True} )
proxies = [matplotlib.lines.Line2D([0], [0], linestyle='none', mfc='black',
mec='none', marker=r'$\mathbf{{{}}}$'.format('bold'),ms=18),
matplotlib.lines.Line2D([0], [0], linestyle='none', mfc='none',
mec='red', marker='s',ms=8,markeredgewidth=2.5),
matplotlib.lines.Line2D([0], [0], linestyle='none', mfc='none',
mec='black', marker='_',ms=2,),
matplotlib.lines.Line2D([0], [1], linestyle='--',c='black',ms=6)] #
descriptions=['Substitutions','Insertions','Deletions','Predicted cleavage position']
ax_hm_ref.legend(proxies, descriptions, numpoints=1, markerscale=2, loc='center', bbox_to_anchor=(0.5, 4),ncol=1)
_jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename)
plt.savefig(_jp('9.Alleles_around_cut_site_for_%s.pdf' % sgRNA_name),bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('9.Alleles_around_cut_site_for_%s.png' % sgRNA_name),bbox_inches='tight',pad=1)
def main():
try:
print ' \n~~~CRISPResso~~~'
print '-Analysis of CRISPR/Cas9 outcomes from deep sequencing data-'
print'''
)
(
__)__
C\| |
\ /
\___/
'''
print'\n[Luca Pinello 2015, send bugs, suggestions or *green coffee* to lucapinello AT gmail DOT com]\n\n',
print 'Version %s\n' % __version__
def print_stacktrace_if_debug():
debug_flag = False
if 'args' in globals() and 'debug' in args:
debug_flag = args.debug
if debug_flag:
traceback.print_exc(file=sys.stdout)
#global variables for the multiprocessing
global args
global include_idxs
global len_amplicon
global exon_positions
global splicing_positions
parser = argparse.ArgumentParser(description='CRISPResso Parameters',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r1','--fastq_r1', type=str, help='First fastq file', required=True,default='Fastq filename' )
parser.add_argument('-r2','--fastq_r2', type=str, help='Second fastq file for paired end reads',default='')
parser.add_argument('-a','--amplicon_seq', type=str, help='Amplicon Sequence', required=True)
#optional
parser.add_argument('-g','--guide_seq', help="sgRNA sequence, if more than one, please separate by comma/s. Note that the sgRNA needs to be input as the guide RNA sequence (usually 20 nt) immediately adjacent to but not including the PAM sequence (5' of NGG for SpCas9). If the PAM is found on the opposite strand with respect to the Amplicon Sequence, ensure the sgRNA sequence is also found on the opposite strand. The CRISPResso convention is to depict the expected cleavage position using the value of the parameter cleavage_offset nt 3' from the end of the guide. In addition, the use of alternate nucleases to SpCas9 is supported. For example, if using the Cpf1 system, enter the sequence (usually 20 nt) immediately 3' of the PAM sequence and explicitly set the cleavage_offset parameter to 1, since the default setting of -3 is suitable only for SpCas9.", default='')
parser.add_argument('-e','--expected_hdr_amplicon_seq', help='Amplicon sequence expected after HDR', default='')
parser.add_argument('-d','--donor_seq', help='Donor Sequence. This optional input comprises a subsequence of the expected HDR amplicon to be highlighted in plots.', default='')
parser.add_argument('-c','--coding_seq', help='Subsequence/s of the amplicon sequence covering one or more coding sequences for the frameshift analysis.If more than one (for example, split by intron/s), please separate by comma.', default='')
parser.add_argument('-q','--min_average_read_quality', type=int, help='Minimum average quality score (phred33) to keep a read', default=0)
parser.add_argument('-s','--min_single_bp_quality', type=int, help='Minimum single bp score (phred33) to keep a read', default=0)
parser.add_argument('--min_identity_score', type=float, help='Minimum identity score for the alignment', default=60.0)
parser.add_argument('-n','--name', help='Output name', default='')
parser.add_argument('-o','--output_folder', help='', default='')
parser.add_argument('--split_paired_end',help='Splits a single fastq file contating paired end reads in two files before running CRISPResso',action='store_true')
parser.add_argument('--trim_sequences',help='Enable the trimming of Illumina adapters with Trimmomatic',action='store_true')
parser.add_argument('--trimmomatic_options_string', type=str, help='Override options for Trimmomatic',default=' ILLUMINACLIP:%s:0:90:10:0:true MINLEN:40' % get_data('NexteraPE-PE.fa'))
parser.add_argument('--min_paired_end_reads_overlap', type=int, help='Parameter for the FLASH read merging step. Minimum required overlap length between two reads to provide a confident overlap. ', default=4)
parser.add_argument('--max_paired_end_reads_overlap', type=int, help='Parameter for the FLASH merging step. Maximum overlap length expected in approximately 90%% of read pairs. Please see the FLASH manual for more information.', default=100)
parser.add_argument('--hide_mutations_outside_window_NHEJ',help='This parameter allows to visualize only the mutations overlapping the cleavage site and used to classify a read as NHEJ. This parameter has no effect on the quanitification of the NHEJ. It may be helpful to mask a pre-existing and known mutations or sequencing errors outside the window used for quantification of NHEJ events.',action='store_true')
parser.add_argument('-w','--window_around_sgrna', type=int, help='Window(s) in bp around the cleavage position (half on on each side) as determined by the provide guide RNA sequence to quantify the indels. Any indels outside this window are excluded. A value of 0 disables this filter.', default=1)
parser.add_argument('--cleavage_offset', type=int, help="Cleavage offset to use within respect to the 3' end of the provided sgRNA sequence. Remember that the sgRNA sequence must be entered without the PAM. The default is -3 and is suitable for the SpCas9 system. For alternate nucleases, other cleavage offsets may be appropriate, for example, if using Cpf1 this parameter would be set to 1.", default=-3)
parser.add_argument('--exclude_bp_from_left', type=int, help='Exclude bp from the left side of the amplicon sequence for the quantification of the indels', default=15)
parser.add_argument('--exclude_bp_from_right', type=int, help='Exclude bp from the right side of the amplicon sequence for the quantification of the indels', default=15)
parser.add_argument('--hdr_perfect_alignment_threshold', type=float, help='Sequence homology %% for an HDR occurrence', default=98.0)
parser.add_argument('--ignore_substitutions',help='Ignore substitutions events for the quantification and visualization',action='store_true')
parser.add_argument('--ignore_insertions',help='Ignore insertions events for the quantification and visualization',action='store_true')
parser.add_argument('--ignore_deletions',help='Ignore deletions events for the quantification and visualization',action='store_true')
parser.add_argument('--needle_options_string',type=str,help='Override options for the Needle aligner',default='-gapopen=10 -gapextend=0.5 -awidth3=5000')
parser.add_argument('--keep_intermediate',help='Keep all the intermediate files',action='store_true')
parser.add_argument('--dump',help='Dump numpy arrays and pandas dataframes to file for debugging purposes',action='store_true')
parser.add_argument('--save_also_png',help='Save also .png images additionally to .pdf files',action='store_true')
parser.add_argument('-p','--n_processes',type=int, help='Specify the number of processes to use for the quantification.\
Please use with caution since increasing this parameter will increase significantly the memory required to run CRISPResso.',default=1)
parser.add_argument('--offset_around_cut_to_plot', type=int, help='Offset to use to summarize alleles around the cut site in the alleles table plot.', default=20)
parser.add_argument('--min_frequency_alleles_around_cut_to_plot', type=float, help='Minimum %% reads required to report an allele in the alleles table plot.', default=0.2)
parser.add_argument('--max_rows_alleles_around_cut_to_plot', type=int, help='Maximum number of rows to report in the alleles table plot. ', default=50)
parser.add_argument('--debug', action='store_true', help='Print stack trace on error.')
args = parser.parse_args()
#check files
check_file(args.fastq_r1)
if args.fastq_r2:
check_file(args.fastq_r2)
#normalize name and remove not allowed characters
if args.name:
clean_name=slugify(args.name)
if args.name!= clean_name:
warn('The specified name %s contained characters not allowed and was changed to: %s' % (args.name,clean_name))
args.name=clean_name
#amplicon sequence check
#make evetything uppercase!
args.amplicon_seq=args.amplicon_seq.strip().upper()
wrong_nt=find_wrong_nt(args.amplicon_seq)
if wrong_nt:
raise NTException('The amplicon sequence contains wrong characters:%s' % ' '.join(wrong_nt))
len_amplicon=len(args.amplicon_seq)
if args.guide_seq:
cut_points=[]
sgRNA_intervals=[]
offset_plots=[]
sgRNA_sequences=[]
args.guide_seq=args.guide_seq.strip().upper()
for current_guide_seq in args.guide_seq.split(','):
if current_guide_seq in args.amplicon_seq:
offset_plots.append(1)
else:
offset_plots.append(0)
wrong_nt=find_wrong_nt(current_guide_seq)
if wrong_nt:
raise NTException('The sgRNA sequence contains wrong characters:%s' % ' '.join(wrong_nt))
offset_fw=args.cleavage_offset+len(current_guide_seq)-1
offset_rc=(-args.cleavage_offset)-1
cut_points+=[m.start() + offset_fw for m in re.finditer(current_guide_seq, args.amplicon_seq)]+[m.start() + offset_rc for m in re.finditer(reverse_complement(current_guide_seq), args.amplicon_seq)]
sgRNA_intervals+=[(m.start(),m.start()+len(current_guide_seq)-1) for m in re.finditer(current_guide_seq, args.amplicon_seq)]+[(m.start(),m.start()+len(current_guide_seq)-1) for m in re.finditer(reverse_complement(current_guide_seq), args.amplicon_seq)]
sgRNA_sequences.append(current_guide_seq)
offset_plots=np.array(offset_plots)
if not cut_points:
raise SgRNASequenceException('The guide sequence/s provided is(are) not present in the amplicon sequence! \n\nPlease check your input!')
else:
info('Cut Points from guide seq:%s' % cut_points)
else:
cut_points=[]
sgRNA_intervals=[]
offset_plots=np.array([])
sgRNA_sequences=[]
if args.expected_hdr_amplicon_seq:
args.expected_hdr_amplicon_seq=args.expected_hdr_amplicon_seq.strip().upper()
if args.expected_hdr_amplicon_seq == args.amplicon_seq:
raise AmpliconEqualDonorException('The amplicon sequence expected after an HDR and the reference amplicon cannot be the same! \n\nPlease check your input!')
wrong_nt=find_wrong_nt(args.expected_hdr_amplicon_seq)
if wrong_nt:
raise NTException('The amplicon sequence expected after an HDR contains wrong characters:%s' % ' '.join(wrong_nt))
#if len(args.expected_hdr_amplicon_seq)!=len(args.amplicon_seq):
aligned_ref,aligned_exp=pairwise2.align.globalxx (args.amplicon_seq,args.expected_hdr_amplicon_seq)[0][:2]
identity_ref_rep=sum([1.0 for a,b in zip(aligned_ref,aligned_exp) if a==b ])/len(aligned_ref)*100
if identity_ref_rep < args.min_identity_score:
raise DonorSequenceException('The amplicon sequence expected after an HDR should be provided as the reference amplicon sequence with the relevant part of the donor sequence replaced, and not just as the donor sequence. \n\nPlease check your input!')
if args.donor_seq:
args.donor_seq=args.donor_seq.strip().upper()
wrong_nt=find_wrong_nt(args.donor_seq)
if wrong_nt:
raise NTException('The donor sequence contains wrong characters:%s' % ' '.join(wrong_nt))
if args.donor_seq not in args.expected_hdr_amplicon_seq:
raise CoreDonorSequenceNotContainedException('The donor sequence provided is not present in the expected HDR amplicon sequence, or the expected HDR amplicon sequence parameter (-e) is not defined. \n\nPlease check your input!')
positions_core_donor_seq=[(m.start(),m.start()+len(args.donor_seq)) for m in re.finditer('(?=%s)' % args.donor_seq, args.expected_hdr_amplicon_seq)]
if len(positions_core_donor_seq)>1:
raise CoreDonorSequenceNotUniqueException('The donor sequence provided is not unique in the expected HDR amplicon sequence. \n\nPlease check your input!')
core_donor_seq_st_en=positions_core_donor_seq[0]
###FRAMESHIFT SUPPORT###
if args.coding_seq:
PERFORM_FRAMESHIFT_ANALYSIS=True
exon_positions=set()
exon_intervals=[]
splicing_positions=[]
for exon_seq in args.coding_seq.strip().upper().split(','):
#check for wrong NT
wrong_nt=find_wrong_nt(exon_seq)
if wrong_nt:
raise NTException('The coding sequence contains wrong characters:%s' % ' '.join(wrong_nt))
st_exon=args.amplicon_seq.find(exon_seq )
if st_exon<0:
raise ExonSequenceException('The coding subsequence/s provided:%s is(are) not contained in the amplicon sequence.' % exon_seq)
en_exon=st_exon+len(exon_seq ) #this do not include the upper bound as usual in python
exon_intervals.append((st_exon,en_exon))
exon_positions=exon_positions.union(set(range(st_exon,en_exon)))
#consider 2 base pairs before and after each exon
splicing_positions+=[max(0,st_exon-2),max(0,st_exon-1),min(len_amplicon-1, en_exon),min(len_amplicon-1, en_exon+1)]
exon_positions=sorted(exon_positions)
#protect from the wrong splitting of exons by the users to avoid false splicing sites
splicing_positions=set(splicing_positions).difference(exon_positions)
else:
PERFORM_FRAMESHIFT_ANALYSIS=False
#we have insertions/deletions that change the concatenated exon sequence lenght and the difference between the final sequence
#and the original sequence lenght is not a multiple of 3
MODIFIED_FRAMESHIFT=0
#we have insertions/deletions that change the concatenated exon sequence lenght and the difference between the final sequence
#and the original sequence lenght is a multiple of 3. We are in this case also when no indels are present but we have
#substitutions
MODIFIED_NON_FRAMESHIFT=0
#we don't touch the exons at all, the read can be still modified tough..
NON_MODIFIED_NON_FRAMESHIFT=0
SPLICING_SITES_MODIFIED=0
################
get_name_from_fasta=lambda x: os.path.basename(x).replace('.fastq','').replace('.gz','')
if not args.name:
if args.fastq_r2!='':
database_id='%s_%s' % (get_name_from_fasta(args.fastq_r1),get_name_from_fasta(args.fastq_r2))
else:
database_id='%s' % get_name_from_fasta(args.fastq_r1)
else:
database_id=args.name
OUTPUT_DIRECTORY='CRISPResso_on_%s' % database_id
if args.output_folder:
OUTPUT_DIRECTORY=os.path.join(os.path.abspath(args.output_folder),OUTPUT_DIRECTORY)
_jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename) #handy function to put a file in the output directory
log_filename=_jp('CRISPResso_RUNNING_LOG.txt')
try:
os.makedirs(OUTPUT_DIRECTORY)
info('Creating Folder %s' % OUTPUT_DIRECTORY)
info('Done!')
except:
warn('Folder %s already exists.' % OUTPUT_DIRECTORY)
finally:
logging.getLogger().addHandler(logging.FileHandler(log_filename))
with open(log_filename,'w+') as outfile:
outfile.write('[Command used]:\nCRISPResso %s\n\n[Execution log]:\n' % ' '.join(sys.argv))
if args.split_paired_end:
if args.fastq_r2!='':
raise Exception('The option --split_paired_end is available only when a single fastq file is specified!')
else:
info('Splitting paired end single fastq file in two files...')
args.fastq_r1,args.fastq_r2=split_paired_end_reads_single_file(args.fastq_r1,
output_filename_r1=_jp(os.path.basename(args.fastq_r1.replace('.fastq','')).replace('.gz','')+'_splitted_r1.fastq.gz'),
output_filename_r2=_jp(os.path.basename(args.fastq_r1.replace('.fastq','')).replace('.gz','')+'_splitted_r2.fastq.gz'),)
splitted_files_to_remove=[args.fastq_r1,args.fastq_r2]
info('Done!')
if args.min_average_read_quality>0 or args.min_single_bp_quality>0:
info('Filtering reads with average bp quality < %d and single bp quality < %d ...' % (args.min_average_read_quality,args.min_single_bp_quality))
if args.fastq_r2!='':
args.fastq_r1,args.fastq_r2=filter_pe_fastq_by_qual(args.fastq_r1,
args.fastq_r2,
output_filename_r1=_jp(os.path.basename(args.fastq_r1.replace('.fastq','')).replace('.gz','')+'_filtered.fastq.gz'),
output_filename_r2=_jp(os.path.basename(args.fastq_r2.replace('.fastq','')).replace('.gz','')+'_filtered.fastq.gz'),
min_bp_quality=args.min_average_read_quality,
min_single_bp_quality=args.min_single_bp_quality,
)
else:
args.fastq_r1=filter_se_fastq_by_qual(args.fastq_r1,
output_filename=_jp(os.path.basename(args.fastq_r1).replace('.fastq','').replace('.gz','')+'_filtered.fastq.gz'),
min_bp_quality=args.min_average_read_quality,
min_single_bp_quality=args.min_single_bp_quality,
)
if args.fastq_r2=='': #single end reads
#check if we need to trim
if not args.trim_sequences:
#create a symbolic link
symlink_filename=_jp(os.path.basename(args.fastq_r1))
force_symlink(os.path.abspath(args.fastq_r1),symlink_filename)
output_forward_filename=symlink_filename
else:
output_forward_filename=_jp('reads.trimmed.fq.gz')
#Trimming with trimmomatic
cmd='java -jar %s SE -phred33 %s %s %s >>%s 2>&1'\
% (get_data('trimmomatic-0.33.jar'),args.fastq_r1,
output_forward_filename,
args.trimmomatic_options_string.replace('NexteraPE-PE.fa','TruSeq3-SE.fa'),
log_filename)
#print cmd
TRIMMOMATIC_STATUS=sb.call(cmd,shell=True)
if TRIMMOMATIC_STATUS:
raise TrimmomaticException('TRIMMOMATIC failed to run, please check the log file.')
processed_output_filename=output_forward_filename
else:#paired end reads case
if not args.trim_sequences:
output_forward_paired_filename=args.fastq_r1
output_reverse_paired_filename=args.fastq_r2
else:
info('Trimming sequences with Trimmomatic...')
output_forward_paired_filename=_jp('output_forward_paired.fq.gz')
output_forward_unpaired_filename=_jp('output_forward_unpaired.fq.gz')
output_reverse_paired_filename=_jp('output_reverse_paired.fq.gz')
output_reverse_unpaired_filename=_jp('output_reverse_unpaired.fq.gz')
#Trimming with trimmomatic
cmd='java -jar %s PE -phred33 %s %s %s %s %s %s %s >>%s 2>&1'\
% (get_data('trimmomatic-0.33.jar'),
args.fastq_r1,args.fastq_r2,output_forward_paired_filename,
output_forward_unpaired_filename,output_reverse_paired_filename,
output_reverse_unpaired_filename,args.trimmomatic_options_string,log_filename)
#print cmd
TRIMMOMATIC_STATUS=sb.call(cmd,shell=True)
if TRIMMOMATIC_STATUS:
raise TrimmomaticException('TRIMMOMATIC failed to run, please check the log file.')
info('Done!')
info('Estimating average read length...')
if get_n_reads_fastq(output_forward_paired_filename):
avg_read_length=get_avg_read_lenght_fastq(output_forward_paired_filename)
std_fragment_length=int(len_amplicon*0.1)
else:
raise NoReadsAfterQualityFiltering('No reads survived the average or single bp quality filtering.')
#Merging with Flash
info('Merging paired sequences with Flash...')
cmd='flash %s %s --allow-outies --max-overlap %d --min-overlap %d -f %d -r %d -s %d -z -d %s >>%s 2>&1' %\
(output_forward_paired_filename,
output_reverse_paired_filename,
args.max_paired_end_reads_overlap,
args.min_paired_end_reads_overlap,
len_amplicon,avg_read_length,
std_fragment_length,
OUTPUT_DIRECTORY,log_filename)
FLASH_STATUS=sb.call(cmd,shell=True)
if FLASH_STATUS:
raise FlashException('Flash failed to run, please check the log file.')
info('Done!')
flash_hist_filename=_jp('out.hist')
flash_histogram_filename=_jp('out.histogram')
flash_not_combined_1_filename=_jp('out.notCombined_1.fastq.gz')
flash_not_combined_2_filename=_jp('out.notCombined_2.fastq.gz')
processed_output_filename=_jp('out.extendedFrags.fastq.gz')
#count reads
N_READS_INPUT=get_n_reads_fastq(args.fastq_r1)
N_READS_AFTER_PREPROCESSING=get_n_reads_fastq(processed_output_filename)
if N_READS_AFTER_PREPROCESSING == 0:
raise NoReadsAfterQualityFiltering('No reads in input or no reads survived the average or single bp quality filtering.')
info('Preparing files for the alignment...')
#parsing flash output and prepare the files for alignment
database_fasta_filename=_jp('%s_database.fa' % database_id)
needle_output_filename=_jp('needle_output_%s.txt.gz' % database_id)
#write .fa file only for amplicon the rest we pipe trough awk on the fly!
with open(database_fasta_filename,'w+') as outfile:
outfile.write('>%s\n%s\n' % (database_id,args.amplicon_seq))
if args.expected_hdr_amplicon_seq:
database_repair_fasta_filename=_jp('%s_database_repair.fa' % database_id)
needle_output_repair_filename=_jp('needle_output_repair_%s.txt.gz' % database_id)
with open(database_repair_fasta_filename,'w+') as outfile:
outfile.write('>%s\n%s\n' % (database_id,args.expected_hdr_amplicon_seq))
info('Done!')
def parse_needle_output(needle_filename,name='seq',just_score=False):
needle_data=[]
try:
needle_infile=gzip.open(needle_filename)
line=needle_infile.readline()
while line:
while line and ('# Aligned_sequences' not in line):
line=needle_infile.readline()
if line:
#print line
needle_infile.readline() #skip another line
line=needle_infile.readline()
id_seq=line.split()[-1].replace('_',':')
for _ in range(5):
needle_infile.readline()
line=needle_infile.readline()
identity_seq=eval(line.strip().split(' ')[-1].replace('%','').replace(')','').replace('(',''))
if just_score:
needle_data.append([id_seq,identity_seq])
else:
for _ in range(7):
needle_infile.readline()
line=needle_infile.readline()
aln_ref_seq=line.split()[2]
aln_str=needle_infile.readline()[21:].rstrip('\n')
line=needle_infile.readline()
aln_query_seq=line.split()[2]
aln_query_len=line.split()[3]
needle_data.append([id_seq,identity_seq,aln_query_len,aln_ref_seq,aln_str,aln_query_seq])
if just_score:
needle_infile.close()
return pd.DataFrame(needle_data,columns=['ID','score_'+name]).set_index('ID')
else:
needle_infile.close()
return pd.DataFrame(needle_data,columns=['ID','score_'+name,'length','ref_seq','align_str','align_seq']).set_index('ID')
except:
raise NeedleException('Failed to parse the output of needle!')
info('Aligning sequences...')
#Alignment here
cmd=(('cat %s |'% processed_output_filename )+\
(' gunzip |' if processed_output_filename.endswith('.gz') else ' '))+\
r''' awk 'NR % 4 == 1 {print ">" $0} NR % 4 ==2 {print $0}' '''+\
" | sed 's/:/_/g' | needle -asequence=%s -bsequence=/dev/stdin -outfile=/dev/stdout %s 2>> %s | gzip >%s"\
%(database_fasta_filename,args.needle_options_string,log_filename,needle_output_filename)
NEEDLE_OUTPUT=sb.call(cmd,shell=True)
if NEEDLE_OUTPUT:
raise NeedleException('Needle failed to run, please check the log file.')
#If we have a donor sequence we just compare the fq in the two cases and see which one alignes better
if args.expected_hdr_amplicon_seq:
cmd_repair=(('cat %s |'% processed_output_filename )+\
(' gunzip |' if processed_output_filename.endswith('.gz') else ' '))+\
r''' awk 'NR % 4 == 1 {print ">" $0} NR % 4 ==2 {print $0}' '''+\
" | sed 's/:/_/g' | needle -asequence=%s -bsequence=/dev/stdin -outfile=/dev/stdout %s 2>> %s | gzip >%s"\
%(database_repair_fasta_filename,args.needle_options_string,log_filename,needle_output_repair_filename)
NEEDLE_OUTPUT=sb.call(cmd_repair,shell=True)
if NEEDLE_OUTPUT:
raise NeedleException('Needle failed to run, please check the log file.')
info('Done!')
#merge the flow
if args.expected_hdr_amplicon_seq:
df_database=parse_needle_output(needle_output_filename,'ref')
df_database_repair=parse_needle_output(needle_output_repair_filename,'repaired',just_score=True)
df_database_and_repair=df_database.join(df_database_repair)
del df_database
del df_database_repair
#filter bad alignments
N_TOTAL_ALSO_UNALIGNED=df_database_and_repair.shape[0]*1.0
#find reads that failed to align and try on the reverse complement
sr_not_aligned=df_database_and_repair.ix[(df_database_and_repair.score_ref <args.min_identity_score)\
& (df_database_and_repair.score_ref< args.min_identity_score)]\
.align_seq.apply(lambda x: x.replace('_',''))
#filter out not aligned reads
df_database_and_repair=\
df_database_and_repair.ix[\
(df_database_and_repair.score_ref>args.min_identity_score)\
|(df_database_and_repair.score_repaired>args.min_identity_score)]
df_database_and_repair['score_diff']=df_database_and_repair.score_ref-df_database_and_repair.score_repaired
df_needle_alignment=df_database_and_repair
del df_database_and_repair
else:
df_needle_alignment=parse_needle_output(needle_output_filename,'ref')
N_TOTAL_ALSO_UNALIGNED=df_needle_alignment.shape[0]*1.0
sr_not_aligned=df_needle_alignment.ix[(df_needle_alignment.score_ref <args.min_identity_score)]\
.align_seq.apply(lambda x: x.replace('_',''))
#filter out not aligned reads
df_needle_alignment=df_needle_alignment.ix[df_needle_alignment.score_ref>args.min_identity_score]
#check if the not aligned reads are in the reverse complement
if sr_not_aligned.count():
#write fastq_not_aligned
fasta_not_aligned_filename=_jp('not_aligned_amplicon_forward.fa.gz')
outfile=gzip.open(fasta_not_aligned_filename,'w+')
for x in sr_not_aligned.iteritems():
outfile.write('>%s\n%s\n' % (x[0],x[1]))
#write reverse complement of ampl and expected amplicon
database_rc_fasta_filename=_jp('%s_database_rc.fa' % database_id)
needle_output_rc_filename=_jp('needle_output_rc_%s.txt.gz' % database_id)
info('Align sequences to reverse complement of the amplicon...')
with open(database_rc_fasta_filename,'w+') as outfile:
outfile.write('>%s\n%s\n' % (database_id,reverse_complement(args.amplicon_seq)))
if args.expected_hdr_amplicon_seq:
database_repair_rc_fasta_filename=_jp('%s_database_repair_rc.fa' % database_id)
needle_output_repair_rc_filename=_jp('needle_output_repair_rc_%s.txt.gz' % database_id)
with open(database_repair_rc_fasta_filename,'w+') as outfile:
outfile.write('>%s\n%s\n' % (database_id,reverse_complement(args.expected_hdr_amplicon_seq)))
info('Done!')
#Now we do the alignment
cmd="zcat < %s | sed 's/:/_/g' | needle -asequence=%s -bsequence=/dev/stdin -outfile=/dev/stdout %s 2>> %s | gzip >%s"\
%(fasta_not_aligned_filename,database_rc_fasta_filename,args.needle_options_string,log_filename,needle_output_rc_filename)
NEEDLE_OUTPUT=sb.call(cmd,shell=True)
if NEEDLE_OUTPUT:
raise NeedleException('Needle failed to run, please check the log file.')
if args.expected_hdr_amplicon_seq:
cmd="zcat < %s | sed 's/:/_/g' | needle -asequence=%s -bsequence=/dev/stdin -outfile=/dev/stdout %s 2>> %s | gzip >%s"\
%(fasta_not_aligned_filename,database_repair_rc_fasta_filename,args.needle_options_string,log_filename,needle_output_repair_rc_filename)
NEEDLE_OUTPUT=sb.call(cmd,shell=True)
if NEEDLE_OUTPUT:
raise NeedleException('Needle failed to run, please check the log file.')
#merge the flow rev
if args.expected_hdr_amplicon_seq:
df_database_rc=parse_needle_output(needle_output_rc_filename,'ref')
df_database_repair_rc=parse_needle_output(needle_output_repair_rc_filename,'repaired',just_score=True)
df_database_and_repair_rc=df_database_rc.join(df_database_repair_rc)
del df_database_rc
del df_database_repair_rc
#filter bad alignments also to rc
df_database_and_repair_rc=\
df_database_and_repair_rc.ix[\
(df_database_and_repair_rc.score_ref>args.min_identity_score)\
|(df_database_and_repair_rc.score_repaired>args.min_identity_score)]
df_database_and_repair_rc['score_diff']=df_database_and_repair_rc.score_ref-df_database_and_repair_rc.score_repaired
df_needle_alignment_rc=df_database_and_repair_rc
del df_database_and_repair_rc
else:
df_needle_alignment_rc=parse_needle_output(needle_output_rc_filename,'ref')
#filter out not aligned reads
df_needle_alignment_rc=df_needle_alignment_rc.ix[df_needle_alignment_rc.score_ref>args.min_identity_score]
#reverse complement and invert the align string so we have everything in the positive strand
df_needle_alignment_rc['ref_seq']=df_needle_alignment_rc['ref_seq'].apply(reverse_complement)
df_needle_alignment_rc['align_seq']=df_needle_alignment_rc['align_seq'].apply(reverse_complement)
df_needle_alignment_rc['align_str']=df_needle_alignment_rc['align_str'].apply(lambda x: x[::-1])
#fix for duplicates when rc alignment
df_needle_alignment_rc.index=map(lambda x:'_'.join([x,'RC']),df_needle_alignment_rc.index)
#append the RC reads to the aligned reads in the original orientation
df_needle_alignment=df_needle_alignment.append(df_needle_alignment_rc)
del df_needle_alignment_rc
#check for duplicates
try:
assert df_needle_alignment.shape[0]== df_needle_alignment.index.unique().shape[0]
except:
raise DuplicateSequenceIdException('The .fastq file/s contain/s duplicate sequence IDs')
#Initializations
info('Quantifying indels/substitutions...')
df_needle_alignment['UNMODIFIED']=(df_needle_alignment.score_ref==100)
#the rest we have to look one by one to potentially exclude regions
df_needle_alignment['MIXED']=False
df_needle_alignment['HDR']=False
df_needle_alignment['NHEJ']=False
df_needle_alignment['n_mutated']=0
df_needle_alignment['n_inserted']=0
df_needle_alignment['n_deleted']=0
N_TOTAL=df_needle_alignment.shape[0]*1.0
if N_TOTAL==0:
raise NoReadsAlignedException('Zero sequences aligned, please check your amplicon sequence')
error('Zero sequences aligned')
#remove the mutations in bp equal to 'N'
if 'N' in args.amplicon_seq:
info('Your amplicon sequence contains one or more N, excluding these bp for the indel quantification...')
def ignore_N_in_alignment(row):
row['align_str']=''.join([('|' if (row['ref_seq'][idx]=='N') else c ) for idx,c in enumerate(row['align_str'])])
if len(set(row['align_str']))==1:
row['UNMODIFIED']=True
return row
df_needle_alignment=df_needle_alignment.apply(ignore_N_in_alignment,axis=1)
#####QUANTIFICATION START
def compute_ref_positions(ref_seq):
pos_idxs=[]
idx=0
for c in ref_seq:
if c in set(['A','T','C','G','N']):
pos_idxs.append(idx)
idx+=1
else:
if idx==0:
pos_idxs.append(-1)
else:
pos_idxs.append(-idx)
return np.array(pos_idxs)
#compute positions relative to alignmnet
df_needle_alignment['ref_positions']=df_needle_alignment['ref_seq'].apply(compute_ref_positions)
#INITIALIZATIONS
re_find_indels=re.compile("(-*-)")
re_find_substitutions=re.compile("(\.*\.)")
effect_vector_insertion=np.zeros(len_amplicon)
effect_vector_deletion=np.zeros(len_amplicon)
effect_vector_mutation=np.zeros(len_amplicon)
effect_vector_any=np.zeros(len_amplicon)
effect_vector_insertion_mixed=np.zeros(len_amplicon)
effect_vector_deletion_mixed=np.zeros(len_amplicon)
effect_vector_mutation_mixed=np.zeros(len_amplicon)
effect_vector_insertion_hdr=np.zeros(len_amplicon)
effect_vector_deletion_hdr=np.zeros(len_amplicon)
effect_vector_mutation_hdr=np.zeros(len_amplicon)
effect_vector_insertion_noncoding=np.zeros(len_amplicon)
effect_vector_deletion_noncoding=np.zeros(len_amplicon)
effect_vector_mutation_noncoding=np.zeros(len_amplicon)
hist_inframe=defaultdict(lambda :0)
hist_frameshift=defaultdict(lambda :0)
avg_vector_del_all=np.zeros(len_amplicon)
avg_vector_ins_all=np.zeros(len_amplicon)
#look around the sgRNA(s) only?
if cut_points and args.window_around_sgrna>0:
include_idxs=[]
half_window=max(1,args.window_around_sgrna/2)
for cut_p in cut_points:
st=max(0,cut_p-half_window+1)
en=min(len(args.amplicon_seq)-1,cut_p+half_window+1)
include_idxs.append(range(st,en))
else:
include_idxs=range(len(args.amplicon_seq))
exclude_idxs=[]
if args.exclude_bp_from_left:
exclude_idxs+=range(args.exclude_bp_from_left)
if args.exclude_bp_from_right:
exclude_idxs+=range(len_amplicon)[-args.exclude_bp_from_right:]
#flatten the arrays to avoid errors with old numpy library
include_idxs=np.ravel(include_idxs)
exclude_idxs=np.ravel(exclude_idxs)
include_idxs=set(np.setdiff1d(include_idxs,exclude_idxs))
#handy generator to split in chunks the dataframe, np.split_array is slow!
def get_chunk(df_needle_alignment,n_processes=args.n_processes):
for g,df in df_needle_alignment.groupby(np.arange(len(df_needle_alignment)) // (len(df_needle_alignment)/(args.n_processes-1))):
yield df
#Use a Pool of processes, or just a single process
if args.n_processes > 1:
info('[CRISPResso quantification is running in parallel mode with %d processes]' % min(df_needle_alignment.shape[0],args.n_processes) )
pool = mp.Pool(processes=min(df_needle_alignment.shape[0],args.n_processes))
chunks_computed=[]
for result in pool.imap(process_df_chunk,get_chunk(df_needle_alignment)):
df_needle_alignment_chunk, effect_vector_insertion_chunk,effect_vector_deletion_chunk,\
effect_vector_mutation_chunk,effect_vector_any_chunk,effect_vector_insertion_mixed_chunk,effect_vector_deletion_mixed_chunk,\
effect_vector_mutation_mixed_chunk,effect_vector_insertion_hdr_chunk,effect_vector_deletion_hdr_chunk,effect_vector_mutation_hdr_chunk,\
effect_vector_insertion_noncoding_chunk,effect_vector_deletion_noncoding_chunk,effect_vector_mutation_noncoding_chunk,hist_inframe_chunk,\
hist_frameshift_chunk,avg_vector_del_all_chunk,avg_vector_ins_all_chunk,MODIFIED_FRAMESHIFT_chunk,MODIFIED_NON_FRAMESHIFT_chunk,NON_MODIFIED_NON_FRAMESHIFT_chunk,\
SPLICING_SITES_MODIFIED_chunk=result
chunks_computed.append(df_needle_alignment_chunk)
effect_vector_insertion+=effect_vector_insertion_chunk
effect_vector_deletion+=effect_vector_deletion_chunk
effect_vector_mutation+=effect_vector_mutation_chunk
effect_vector_any+=effect_vector_any_chunk
effect_vector_insertion_mixed+=effect_vector_insertion_mixed_chunk
effect_vector_deletion_mixed+=effect_vector_deletion_mixed_chunk
effect_vector_mutation_mixed+=effect_vector_mutation_mixed_chunk
effect_vector_insertion_hdr+=effect_vector_insertion_hdr_chunk
effect_vector_deletion_hdr+=effect_vector_deletion_hdr_chunk
effect_vector_mutation_hdr+=effect_vector_mutation_hdr_chunk
effect_vector_insertion_noncoding+=effect_vector_insertion_noncoding_chunk
effect_vector_deletion_noncoding+=effect_vector_deletion_noncoding_chunk
effect_vector_mutation_noncoding+=effect_vector_mutation_noncoding_chunk
add_hist(hist_inframe_chunk,hist_inframe)
add_hist(hist_frameshift_chunk,hist_frameshift)
avg_vector_del_all+=avg_vector_del_all_chunk
avg_vector_ins_all+=avg_vector_ins_all_chunk
MODIFIED_FRAMESHIFT+=MODIFIED_FRAMESHIFT_chunk
MODIFIED_NON_FRAMESHIFT+=MODIFIED_NON_FRAMESHIFT_chunk
NON_MODIFIED_NON_FRAMESHIFT+=NON_MODIFIED_NON_FRAMESHIFT_chunk
SPLICING_SITES_MODIFIED+=SPLICING_SITES_MODIFIED_chunk
pool.close()
pool.join()
df_needle_alignment=pd.concat(chunks_computed)
del chunks_computed
else:
df_needle_alignment, effect_vector_insertion,\
effect_vector_deletion,effect_vector_mutation,\
effect_vector_any,effect_vector_insertion_mixed,\
effect_vector_deletion_mixed,effect_vector_mutation_mixed,\
effect_vector_insertion_hdr,effect_vector_deletion_hdr,\
effect_vector_mutation_hdr,effect_vector_insertion_noncoding,\
effect_vector_deletion_noncoding,effect_vector_mutation_noncoding,\
hist_inframe,hist_frameshift,avg_vector_del_all,\
avg_vector_ins_all,MODIFIED_FRAMESHIFT,MODIFIED_NON_FRAMESHIFT,\
NON_MODIFIED_NON_FRAMESHIFT,SPLICING_SITES_MODIFIED= process_df_chunk(df_needle_alignment)
N_MODIFIED=df_needle_alignment['NHEJ'].sum()
N_UNMODIFIED=df_needle_alignment['UNMODIFIED'].sum()
N_MIXED_HDR_NHEJ=df_needle_alignment['MIXED'].sum()
N_REPAIRED=df_needle_alignment['HDR'].sum()
#disable known division warning
with np.errstate(divide='ignore',invalid='ignore'):
effect_vector_combined=100*effect_vector_any/float(N_TOTAL)
avg_vector_ins_all/=(effect_vector_insertion+effect_vector_insertion_hdr+effect_vector_insertion_mixed)
avg_vector_del_all/=(effect_vector_deletion+effect_vector_deletion_hdr+effect_vector_deletion_mixed)
avg_vector_ins_all[np.isnan(avg_vector_ins_all)]=0
avg_vector_del_all[np.isnan(avg_vector_del_all)]=0
avg_vector_ins_all[np.isinf(avg_vector_ins_all)]=0
avg_vector_del_all[np.isinf(avg_vector_del_all)]=0
if PERFORM_FRAMESHIFT_ANALYSIS:
if not dict(hist_inframe):
hist_inframe={0:0}
if not dict(hist_frameshift):
hist_frameshift={0:0}
info('Done!')
info('Calculating indel distribution based on the length of the reads...')
df_needle_alignment['effective_len']=df_needle_alignment.apply(lambda row: len_amplicon+row.n_inserted-row.n_deleted,axis=1)
info('Done!')
#write alleles table
info('Calculating alleles frequencies...')
def get_ref_positions(row,df_alignment):
#return list(df_alignment.ix[(row.Aligned_Sequence ,row.Reference_Sequence),'ref_positions'][0])
return list(df_alignment.loc[[(row.Aligned_Sequence,row.Reference_Sequence)]].iloc[0,].loc['ref_positions'])
df_alleles=df_needle_alignment.groupby(['align_seq','ref_seq','NHEJ','UNMODIFIED','HDR','n_deleted','n_inserted','n_mutated',]).size()
df_alleles=df_alleles.reset_index()
df_alleles.rename(columns={0:'#Reads','align_seq':'Aligned_Sequence','ref_seq':'Reference_Sequence'},inplace=True)
#df_alleles.set_index('Aligned_Sequence',inplace=True)
df_alleles['%Reads']=df_alleles['#Reads']/df_alleles['#Reads'].sum()*100
if np.sum(np.array(map(int,pd.__version__.split('.')))*(100,10,1))< 170:
df_alleles.sort('#Reads',ascending=False,inplace=True)
else:
df_alleles.sort_values(by='#Reads',ascending=False,inplace=True)
#add ref positions for the plot around the cut sites
df_needle_alignment.set_index(['align_seq','ref_seq'],inplace=True)
df_needle_alignment.sort_index(inplace=True)
df_alleles['ref_positions']=df_alleles.apply(lambda x: get_ref_positions(x,df_needle_alignment),axis=1).values
info('Done!')
info('Making Plots...')
#plot effective length
if args.guide_seq:
min_cut=min(cut_points)
max_cut=max(cut_points)
xmin,xmax=-min_cut,len_amplicon-max_cut
else:
min_cut=len_amplicon/2
max_cut=len_amplicon/2
xmin,xmax=-min_cut,+max_cut
hdensity,hlengths=np.histogram(df_needle_alignment.effective_len-len_amplicon,np.arange(xmin,xmax))
hlengths=hlengths[:-1]
center_index=np.nonzero(hlengths==0)[0][0]
fig=plt.figure(figsize=(8.3,8))
plt.bar(0,hdensity[center_index],color='red',linewidth=0)
#plt.hold(True)
barlist=plt.bar(hlengths,hdensity,align='center',linewidth=0)
barlist[center_index].set_color('r')
plt.xlim([xmin,xmax])
plt.ylabel('Sequences (no.)')
plt.xlabel('Indel size (bp)')
plt.ylim([0,hdensity.max()*1.2])
plt.title('Indel size distribution')
lgd=plt.legend(['No indel','Indel'],loc='center', bbox_to_anchor=(0.5, -0.22),ncol=1, fancybox=True, shadow=True)
#lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.28),ncol=1, fancybox=True, shadow=True)
lgd.legendHandles[0].set_height(3)
lgd.legendHandles[1].set_height(3)
plt.savefig(_jp('1a.Indel_size_distribution_n_sequences.pdf'),bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('1a.Indel_size_distribution_n_sequences.png'),bbox_inches='tight')
plt.figure(figsize=(8.3,8))
plt.bar(0,hdensity[center_index]/(float(hdensity.sum()))*100.0,color='red',linewidth=0)
#plt.hold(True)
barlist=plt.bar(hlengths,hdensity/(float(hdensity.sum()))*100.0,align='center',linewidth=0)
barlist[center_index].set_color('r')
plt.xlim([xmin,xmax])
plt.title('Indel size distribution')
plt.ylabel('Sequences (%)')
plt.xlabel('Indel size (bp)')
#lgd=plt.legend(['No indel','Indel'])
lgd=plt.legend(['No indel','Indel'],loc='center', bbox_to_anchor=(0.5, -0.22),ncol=1, fancybox=True, shadow=True)
lgd.legendHandles[0].set_height(3)
lgd.legendHandles[1].set_height(3)
plt.savefig(_jp('1b.Indel_size_distribution_percentage.pdf'),bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('1b.Indel_size_distribution_percentage.png'),bbox_inches='tight')
####PIE CHARTS FOR HDR/NHEJ/MIXED/EVENTS###
if args.expected_hdr_amplicon_seq:
fig=plt.figure(figsize=(12*1.5,14.5*1.5))
ax1 = plt.subplot2grid((6,3), (0, 0), colspan=3, rowspan=5)
patches, texts, autotexts =ax1.pie([N_UNMODIFIED,N_MIXED_HDR_NHEJ,N_MODIFIED,N_REPAIRED],\
labels=['Unmodified\n(%d reads)' %N_UNMODIFIED,\
'Mixed HDR-NHEJ\n(%d reads)' %N_MIXED_HDR_NHEJ,
'NHEJ\n(%d reads)' % N_MODIFIED, \
'HDR\n(%d reads)' %N_REPAIRED,
],\
explode=(0,0,0,0),\
colors=[(1,0,0,0.2),(0,1,1,0.2),(0,0,1,0.2),(0,1,0,0.2)],autopct='%1.1f%%')
if cut_points or args.donor_seq:
ax2 = plt.subplot2grid((6,3), (5, 0), colspan=3, rowspan=1)
ax2.plot([0,len_amplicon],[0,0],'-k',lw=2,label='Amplicon sequence')
#plt.hold(True)
if args.donor_seq:
ax2.plot(core_donor_seq_st_en,[0,0],'-',lw=10,c=(0,1,0,0.5),label='Donor Sequence')
if cut_points:
ax2.plot(cut_points+offset_plots,np.zeros(len(cut_points)),'vr', ms=24,label='Predicted Cas9 cleavage site/s')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
ax2.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA')
else:
ax2.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_')
plt.legend(bbox_to_anchor=(0, 0, 1., 0), ncol=1, mode="expand", borderaxespad=0.,numpoints=1)
plt.xlim(0,len_amplicon)
plt.axis('off')
proptease = fm.FontProperties()
proptease.set_size('xx-large')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.savefig(_jp('2.Unmodified_NHEJ_HDR_pie_chart.pdf'),pad_inches=1,bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('2.Unmodified_NHEJ_HDR_pie_chart.png'),pad_inches=1,bbox_inches='tight')
else:
fig=plt.figure(figsize=(12*1.5,14.5*1.5))
ax1 = plt.subplot2grid((6,3), (0, 0), colspan=3, rowspan=5)
patches, texts, autotexts =ax1.pie([N_UNMODIFIED/N_TOTAL*100,N_MODIFIED/N_TOTAL*100],\
labels=['Unmodified\n(%d reads)' %N_UNMODIFIED,\
'NHEJ\n(%d reads)' % N_MODIFIED],\
explode=(0,0),colors=[(1,0,0,0.2),(0,0,1,0.2)],autopct='%1.1f%%')
if cut_points:
ax2 = plt.subplot2grid((6,3), (5, 0), colspan=3, rowspan=1)
ax2.plot([0,len_amplicon],[0,0],'-k',lw=2,label='Amplicon sequence')
#plt.hold(True)
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
ax2.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
ax2.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
ax2.plot(cut_points+offset_plots,np.zeros(len(cut_points)),'vr', ms=12,label='Predicted Cas9 cleavage site/s')
plt.legend(bbox_to_anchor=(0, 0, 1., 0), ncol=1, mode="expand", borderaxespad=0.,numpoints=1,prop={'size':'large'})
plt.xlim(0,len_amplicon)
plt.axis('off')
proptease = fm.FontProperties()
proptease.set_size('xx-large')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.savefig(_jp('2.Unmodified_NHEJ_pie_chart.pdf'),pad_inches=1,bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('2.Unmodified_NHEJ_pie_chart.png'),pad_inches=1,bbox_inches='tight')
###############################################################################################################################################
###############################################################################################################################################
#(3) a graph of frequency of deletions and insertions of various sizes (deletions could be consider as negative numbers and insertions as positive);
def calculate_range(df,column_name):
df_not_zero=df.ix[df[column_name]>0,column_name]
try:
r=max(15,int(np.round(np.percentile(df_not_zero,99))))
except:
r=15
return r
range_mut=calculate_range(df_needle_alignment,'n_mutated')
range_ins=calculate_range(df_needle_alignment,'n_inserted')
range_del=calculate_range(df_needle_alignment,'n_deleted')
y_values_mut,x_bins_mut=plt.histogram(df_needle_alignment['n_mutated'],bins=range(0,range_mut))
y_values_ins,x_bins_ins=plt.histogram(df_needle_alignment['n_inserted'],bins=range(0,range_ins))
y_values_del,x_bins_del=plt.histogram(df_needle_alignment['n_deleted'],bins=range(0,range_del))
fig=plt.figure(figsize=(26,6.5))
ax=fig.add_subplot(1,3,1)
ax.bar(x_bins_ins[:-1],y_values_ins,align='center',linewidth=0,color=(0,0,1))
barlist=ax.bar(x_bins_ins[:-1],y_values_ins,align='center',linewidth=0,color=(0,0,1))
barlist[0].set_color('r')
plt.title('Insertions')
plt.xlabel('Size (bp)')
plt.ylabel('Sequences % (no.)')
lgd=plt.legend(['Non-insertion','Insertion'][::-1], bbox_to_anchor=(.82, -0.22),ncol=1, fancybox=True, shadow=True)
lgd.legendHandles[0].set_height(6)
lgd.legendHandles[1].set_height(6)
plt.xlim(xmin=-1)
y_label_values= np.round(np.linspace(0, min(N_TOTAL,max(ax.get_yticks())),6))# np.arange(0,y_max,y_max/6.0)
plt.yticks(y_label_values,['%.1f%% (%d)' % (n_reads/N_TOTAL*100,n_reads) for n_reads in y_label_values])
ax=fig.add_subplot(1,3,2)
ax.bar(-x_bins_del[:-1],y_values_del,align='center',linewidth=0,color=(0,0,1))
barlist=ax.bar(-x_bins_del[:-1],y_values_del,align='center',linewidth=0,color=(0,0,1))
barlist[0].set_color('r')
plt.title('Deletions')
plt.xlabel('Size (bp)')
plt.ylabel('Sequences % (no.)')
lgd=plt.legend(['Non-deletion','Deletion'][::-1], bbox_to_anchor=(.82, -0.22),ncol=1, fancybox=True, shadow=True)
lgd.legendHandles[0].set_height(6)
lgd.legendHandles[1].set_height(6)
plt.xlim(xmax=1)
y_label_values= np.round(np.linspace(0, min(N_TOTAL,max(ax.get_yticks())),6))# np.arange(0,y_max,y_max/6.0)
plt.yticks(y_label_values,['%.1f%% (%d)' % (n_reads/N_TOTAL*100,n_reads) for n_reads in y_label_values])
ax=fig.add_subplot(1,3,3)
ax.bar(x_bins_mut[:-1],y_values_mut,align='center',linewidth=0,color=(0,0,1))
barlist=ax.bar(x_bins_mut[:-1],y_values_mut,align='center',linewidth=0,color=(0,0,1))
barlist[0].set_color('r')
plt.title('Substitutions')
plt.xlabel('Positions substituted (number)')
plt.ylabel('Sequences % (no.)')
lgd=plt.legend(['Non-substitution','Substitution'][::-1] ,bbox_to_anchor=(.82, -0.22),ncol=1, fancybox=True, shadow=True)
lgd.legendHandles[0].set_height(6)
lgd.legendHandles[1].set_height(6)
plt.xlim(xmin=-1)
y_label_values= np.round(np.linspace(0, min(N_TOTAL,max(ax.get_yticks())),6))# np.arange(0,y_max,y_max/6.0)
plt.yticks(y_label_values,['%.1f%% (%d)' % (n_reads/N_TOTAL*100,n_reads) for n_reads in y_label_values])
plt.tight_layout()
plt.savefig(_jp('3.Insertion_Deletion_Substitutions_size_hist.pdf'),bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('3.Insertion_Deletion_Substitutions_size_hist.png'),bbox_inches='tight')
#(4) another graph with the frequency that each nucleotide within the amplicon was modified in any way (perhaps would consider insertion as modification of the flanking nucleotides);
#Indels location Plots
plt.figure(figsize=(10,10))
y_max=max(effect_vector_any)*1.2
plt.plot(effect_vector_any,'r',lw=3,label='Combined Insertions/Deletions/Substitutions')
#plt.hold(True)
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.23),ncol=1, fancybox=True, shadow=True)
ylabel_values = np.arange(0,1,1.0/6.0)
if y_max > 0:
y_label_values=np.arange(0,y_max,y_max/6.0)
plt.yticks(y_label_values,['%.1f%% (%d)' % (n_reads/float(N_TOTAL)*100, n_reads) for n_reads in y_label_values])
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.title('Mutation position distribution')
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Sequences % (no.)')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len(args.amplicon_seq)-1)
plt.savefig(_jp('4a.Combined_Insertion_Deletion_Substitution_Locations.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('4a.Combined_Insertion_Deletion_Substitution_Locations.png'),bbox_extra_artists=(lgd,), bbox_inches='tight',pad=1)
#NHEJ
plt.figure(figsize=(10,10))
plt.plot(effect_vector_insertion,'r',lw=3,label='Insertions')
#plt.hold(True)
plt.plot(effect_vector_deletion,'m',lw=3,label='Deletions')
plt.plot(effect_vector_mutation,'g',lw=3,label='Substitutions')
y_max=max(max(effect_vector_insertion),max(effect_vector_deletion),max(effect_vector_mutation))*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.28),ncol=1, fancybox=True, shadow=True)
ylabel_values = np.arange(0,1,1.0/6.0)
if y_max > 0:
y_label_values=np.arange(0,y_max,y_max/6.0)
plt.yticks(y_label_values,['%.1f%% (%.1f%% , %d)' % (n_reads/float(N_TOTAL)*100,n_reads/float(N_MODIFIED)*100, n_reads) for n_reads in y_label_values])
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Sequences: % Total ( % NHEJ, no. )')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len(args.amplicon_seq)-1)
plt.title('Mutation position distribution of NHEJ')
plt.savefig(_jp('4b.Insertion_Deletion_Substitution_Locations_NHEJ.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('4b.Insertion_Deletion_Substitution_Locations_NHEJ.png'),bbox_extra_artists=(lgd,), bbox_inches='tight',pad=1)
if args.expected_hdr_amplicon_seq:
#HDR
plt.figure(figsize=(10,10))
plt.plot(effect_vector_insertion_hdr,'r',lw=3,label='Insertions')
#plt.hold(True)
plt.plot(effect_vector_deletion_hdr,'m',lw=3,label='Deletions')
plt.plot(effect_vector_mutation_hdr,'g',lw=3,label='Substitutions')
y_max=max(max(effect_vector_insertion_hdr),max(effect_vector_deletion_hdr),max(effect_vector_mutation_hdr))*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.28),ncol=1, fancybox=True, shadow=True)
ylabel_values = np.arange(0,1,1.0/6.0)
if y_max > 0:
y_label_values=np.arange(0,y_max,y_max/6).astype(int)
plt.yticks(y_label_values,['%.1f%% (%.1f%% , %d)' % (n_reads/float(N_TOTAL)*100,n_reads/float(N_REPAIRED)*100, n_reads) for n_reads in y_label_values])
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Sequences: % Total ( % HDR, no. )')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len(args.amplicon_seq)-1)
plt.title('Mutation position distribution of HDR')
plt.savefig(_jp('4c.Insertion_Deletion_Substitution_Locations_HDR.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('4c.Insertion_Deletion_Substitution_Locations_HDR.png'),bbox_extra_artists=(lgd,), bbox_inches='tight',pad=1)
#MIXED
plt.figure(figsize=(10,10))
plt.plot(effect_vector_insertion_mixed,'r',lw=3,label='Insertions')
#plt.hold(True)
plt.plot(effect_vector_deletion_mixed,'m',lw=3,label='Deletions')
plt.plot(effect_vector_mutation_mixed,'g',lw=3,label='Substitutions')
y_max=max(max(effect_vector_insertion_mixed),max(effect_vector_deletion_mixed),max(effect_vector_mutation_mixed))*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.28),ncol=1, fancybox=True, shadow=True)
ylabel_values = np.arange(0,1,1.0/6.0)
if y_max > 0:
y_label_values=np.arange(0,y_max,y_max/6).astype(int)
plt.yticks(y_label_values,['%.1f%% (%.1f%% , %d)' % (n_reads/float(N_TOTAL)*100,n_reads/float(N_MIXED_HDR_NHEJ)*100, n_reads) for n_reads in y_label_values])
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Sequences: % Total ( % mixed HDR-NHEJ, no. )')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len(args.amplicon_seq)-1)
plt.title('Mutation position distribution of mixed HDR-NHEJ')
plt.savefig(_jp('4d.Insertion_Deletion_Substitution_Locations_Mixed_HDR_NHEJ.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('4d.Insertion_Deletion_Substitution_Locations_Mixed_HDR_NHEJ.png'),bbox_extra_artists=(lgd,), bbox_inches='tight',pad=1)
#Position dependent indels plot
fig=plt.figure(figsize=(24,10))
ax1=fig.add_subplot(1,2,1)
markerline, stemlines, baseline=ax1.stem(avg_vector_ins_all,'r',lw=3,markerfmt="s",markerline=None,s=50)
plt.setp(markerline, 'markerfacecolor', 'r', 'markersize', 8)
plt.setp(baseline, 'linewidth', 0)
plt.setp(stemlines, 'color', 'r','linewidth',3)
#plt.hold(True)
y_max=max(avg_vector_ins_all)*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
ax1.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
ax1.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Average insertion length')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len_amplicon-1)
ax1.set_title('Position dependent insertion size')
plt.tight_layout()
ax2=fig.add_subplot(1,2,2)
markerline, stemlines, baseline=ax2.stem(avg_vector_del_all,'r',lw=3,markerfmt="s",markerline=None,s=50)
plt.setp(markerline, 'markerfacecolor', 'm', 'markersize', 8)
plt.setp(baseline, 'linewidth', 0)
plt.setp(stemlines, 'color', 'm','linewidth',3)
#plt.hold(True)
y_max=max(avg_vector_del_all)*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
ax2.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
ax2.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Average deletion length')
plt.ylim(ymin=0,ymax=max(1,y_max))
plt.xlim(xmax=len_amplicon-1)
ax2.set_title('Position dependent deletion size')
plt.tight_layout()
plt.savefig(_jp('4e.Position_dependent_average_indel_size.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('4e.Position_dependent_average_indel_size.png'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if PERFORM_FRAMESHIFT_ANALYSIS:
#make frameshift plots
fig=plt.figure(figsize=(12*1.5,14.5*1.5))
ax1 = plt.subplot2grid((6,3), (0, 0), colspan=3, rowspan=5)
patches, texts, autotexts =ax1.pie([MODIFIED_FRAMESHIFT,\
MODIFIED_NON_FRAMESHIFT,\
NON_MODIFIED_NON_FRAMESHIFT],\
labels=['Frameshift mutation\n(%d reads)' %MODIFIED_FRAMESHIFT,\
'In-frame mutation\n(%d reads)' % MODIFIED_NON_FRAMESHIFT,\
'Noncoding mutation\n(%d reads)' %NON_MODIFIED_NON_FRAMESHIFT],\
explode=(0.0,0.0,0.0),\
colors=[(0.89019608, 0.29019608, 0.2, 0.8),(0.99215686, 0.73333333, 0.51764706,0.8),(0.99607843, 0.90980392, 0.78431373,0.8)],\
autopct='%1.1f%%')
ax2 = plt.subplot2grid((6,3), (5, 0), colspan=3, rowspan=1)
ax2.plot([0,len_amplicon],[0,0],'-k',lw=2,label='Amplicon sequence')
#plt.hold(True)
for idx,exon_interval in enumerate(exon_intervals):
if idx==0:
ax2.plot(exon_interval,[0,0],'-',lw=10,c=(0,0,1,0.5),label='Coding sequence/s',solid_capstyle='butt')
else:
ax2.plot(exon_interval,[0,0],'-',lw=10,c=(0,0,1,0.5),label='_nolegend_',solid_capstyle='butt')
if cut_points:
ax2.plot(cut_points+offset_plots,np.zeros(len(cut_points)),'vr', ms=25,label='Predicted Cas9 cleavage site/s')
plt.legend(bbox_to_anchor=(0, 0, 1., 0), ncol=1, mode="expand", borderaxespad=0.,numpoints=1)
plt.xlim(0,len_amplicon)
plt.axis('off')
proptease = fm.FontProperties()
proptease.set_size('xx-large')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.savefig(_jp('5.Frameshift_In-frame_mutations_pie_chart.pdf'),pad_inches=1,bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('5.Frameshift_In-frame_mutations_pie_chart.png'),pad_inches=1,bbox_inches='tight')
#profiles-----------------------------------------------------------------------------------
fig=plt.figure(figsize=(22,10))
ax1=fig.add_subplot(2,1,1)
x,y=map(np.array,zip(*[a for a in hist_frameshift.iteritems()]))
y=y/float(sum(hist_frameshift.values()))*100
ax1.bar(x-0.5,y)
ax1.set_xlim(-30.5,30.5)
ax1.set_frame_on(False)
ax1.set_xticks([idx for idx in range(-30,31) if idx % 3])
ax1.tick_params(which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='on') # labels along the bottom edge are off)
ax1.yaxis.tick_left()
xmin, xmax = ax1.get_xaxis().get_view_interval()
ymin, ymax = ax1.get_yaxis().get_view_interval()
ax1.set_xticklabels([str(idx) for idx in [idx for idx in range(-30,31) if idx % 3]],rotation='vertical')
plt.title('Frameshift profile')
ax1.tick_params(axis='both', which='major', labelsize=32)
ax1.tick_params(axis='both', which='minor', labelsize=32)
plt.tight_layout()
plt.ylabel('%')
ax2=fig.add_subplot(2,1,2)
x,y=map(np.array,zip(*[a for a in hist_inframe.iteritems()]))
y=y/float(sum(hist_inframe.values()))*100
ax2.bar(x-0.5,y,color=(0,1,1,0.2))
ax2.set_xlim(-30.5,30.5)
ax2.set_frame_on(False)
ax2.set_xticks([idx for idx in range(-30,31) if (idx % 3 ==0) ])
ax2.tick_params(which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='on') # labels along the bottom edge are off)
ax2.yaxis.tick_left()
xmin, xmax = ax2.xaxis.get_view_interval()
ymin, ymax = ax2.yaxis.get_view_interval()
ax2.set_xticklabels([str(idx) for idx in [idx for idx in range(-30,31) if (idx % 3==0)]],rotation='vertical')
plt.title('In-frame profile')
plt.tight_layout()
plt.ylabel('%')
ax2.tick_params(axis='both', which='major', labelsize=32)
ax2.tick_params(axis='both', which='minor', labelsize=32)
plt.tight_layout()
plt.savefig(_jp('6.Frameshift_In-frame_mutation_profiles.pdf'),pad_inches=1,bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('6.Frameshift_In-frame_mutation_profiles.png'),pad_inches=1,bbox_inches='tight')
#-----------------------------------------------------------------------------------------------------------
fig=plt.figure(figsize=(12*1.5,12*1.5))
ax=fig.add_subplot(1,1,1)
patches, texts, autotexts =ax.pie([SPLICING_SITES_MODIFIED,\
(df_needle_alignment.shape[0] - SPLICING_SITES_MODIFIED)],\
labels=['Potential splice sites modified\n(%d reads)' %SPLICING_SITES_MODIFIED,\
'Unmodified\n(%d reads)' % (df_needle_alignment.shape[0]- SPLICING_SITES_MODIFIED)],\
explode=(0.0,0),\
colors=[(0.89019608, 0.29019608, 0.2, 0.8),(0.99607843, 0.90980392, 0.78431373,0.8)],\
autopct='%1.1f%%')
proptease = fm.FontProperties()
proptease.set_size('xx-large')
plt.setp(autotexts, fontproperties=proptease)
plt.setp(texts, fontproperties=proptease)
plt.savefig(_jp('8.Potential_Splice_Sites_pie_chart.pdf'),pad_inches=1,bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('8.Potential_Splice_Sites_pie_chart.png'),pad_inches=1,bbox_inches='tight')
#non coding
plt.figure(figsize=(10,10))
plt.plot(effect_vector_insertion_noncoding,'r',lw=3,label='Insertions')
#plt.hold(True)
plt.plot(effect_vector_deletion_noncoding,'m',lw=3,label='Deletions')
plt.plot(effect_vector_mutation_noncoding,'g',lw=3,label='Substitutions')
y_max=max(max(effect_vector_insertion_noncoding),max(effect_vector_deletion_noncoding),max(effect_vector_mutation_noncoding))*1.2
if cut_points:
for idx,cut_point in enumerate(cut_points):
if idx==0:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='Predicted cleavage position')
else:
plt.plot([cut_point+offset_plots[idx],cut_point+offset_plots[idx]],[0,y_max],'--k',lw=2,label='_nolegend_')
for idx,sgRNA_int in enumerate(sgRNA_intervals):
if idx==0:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='sgRNA',solid_capstyle='butt')
else:
plt.plot([sgRNA_int[0],sgRNA_int[1]],[0,0],lw=10,c=(0,0,0,0.15),label='_nolegend_',solid_capstyle='butt')
lgd=plt.legend(loc='center', bbox_to_anchor=(0.5, -0.28),ncol=1, fancybox=True, shadow=True)
plt.xticks(np.arange(0,len_amplicon,max(3,(len_amplicon/6) - (len_amplicon/6)%5)).astype(int) )
plt.xlabel('Reference amplicon position (bp)')
plt.ylabel('Sequences (no.)')
plt.ylim(0,max(1,y_max))
plt.xlim(xmax=len(args.amplicon_seq)-1)
plt.title('Noncoding mutation position distribution')
plt.savefig(_jp('7.Insertion_Deletion_Substitution_Locations_Noncoding.pdf'),bbox_extra_artists=(lgd,), bbox_inches='tight')
if args.save_also_png:
plt.savefig(_jp('7.Insertion_Deletion_Substitution_Locations_Noncoding.png'),bbox_extra_artists=(lgd,), bbox_inches='tight')
##new plots alleles around cut_sites
for sgRNA,cut_point in zip(sgRNA_sequences,cut_points):
#print sgRNA,cut_point
df_allele_around_cut=get_dataframe_around_cut(df_alleles, cut_point,args.offset_around_cut_to_plot)
#write alleles table to file
df_allele_around_cut.to_csv(_jp('Alleles_frequency_table_around_cut_site_for_%s.txt' % sgRNA),sep='\t',header=True)
plot_alleles_table(args.amplicon_seq,cut_point, df_allele_around_cut,sgRNA,OUTPUT_DIRECTORY,MIN_FREQUENCY=args.min_frequency_alleles_around_cut_to_plot,MAX_N_ROWS=args.max_rows_alleles_around_cut_to_plot)
info('Done!')
if not args.keep_intermediate:
info('Removing Intermediate files...')
if args.fastq_r2!='':
files_to_remove=[processed_output_filename,flash_hist_filename,flash_histogram_filename,\
flash_not_combined_1_filename,flash_not_combined_2_filename,\
database_fasta_filename]
else:
files_to_remove=[processed_output_filename,database_fasta_filename]
if args.trim_sequences and args.fastq_r2!='':
files_to_remove+=[output_forward_paired_filename,output_reverse_paired_filename,\
output_forward_unpaired_filename,output_reverse_unpaired_filename]
if not args.dump:
files_to_remove+=[needle_output_filename]
if args.expected_hdr_amplicon_seq:
files_to_remove+=[needle_output_repair_filename]
if args.expected_hdr_amplicon_seq:
files_to_remove+=[database_repair_fasta_filename,]
if args.split_paired_end:
files_to_remove+=splitted_files_to_remove
if args.min_average_read_quality>0 or args.min_single_bp_quality>0:
if args.fastq_r2!='':
files_to_remove+=[args.fastq_r1,args.fastq_r2]
else:
files_to_remove+=[args.fastq_r1]
if sr_not_aligned.count():
files_to_remove+=[fasta_not_aligned_filename,database_rc_fasta_filename,needle_output_rc_filename]
if args.expected_hdr_amplicon_seq:
files_to_remove+=[database_repair_rc_fasta_filename,needle_output_repair_rc_filename]
for file_to_remove in files_to_remove:
try:
if os.path.islink(file_to_remove):
os.unlink(file_to_remove)
else:
os.remove(file_to_remove)
except:
warn('Skipping:%s' %file_to_remove)
#write effect vectors as plain text files
info('Saving processed data...')
def save_vector_to_file(vector,name):
np.savetxt(_jp('%s.txt' %name), np.vstack([(np.arange(len(vector))+1),vector]).T, fmt=['%d','%.18e'],delimiter='\t', newline='\n', header='amplicon position\teffect',footer='', comments='# ')
nhej_inserted = np.sum(df_needle_alignment.ix[df_needle_alignment.NHEJ,'n_inserted']>0)
if np.isnan(nhej_inserted): nhej_inserted = 0
nhej_deleted = np.sum(df_needle_alignment.ix[df_needle_alignment.NHEJ,'n_deleted']>0)
if np.isnan(nhej_deleted): nhej_deleted = 0
nhej_mutated = np.sum(df_needle_alignment.ix[df_needle_alignment.NHEJ,'n_mutated']>0)
if np.isnan(nhej_mutated): nhej_mutated = 0
hdr_inserted = np.sum(df_needle_alignment.ix[df_needle_alignment.HDR,'n_inserted']>0)
if np.isnan(hdr_inserted): hdr_inserted = 0
hdr_deleted = np.sum(df_needle_alignment.ix[df_needle_alignment.HDR,'n_deleted']>0)
if np.isnan(hdr_deleted): hdr_deleted = 0
hdr_mutated = np.sum(df_needle_alignment.ix[df_needle_alignment.HDR,'n_mutated']>0)
if np.isnan(hdr_mutated): hdr_mutated = 0
mixed_inserted = np.sum(df_needle_alignment.ix[df_needle_alignment.MIXED,'n_inserted']>0)
if np.isnan(mixed_inserted): mixed_inserted = 0
mixed_deleted = np.sum(df_needle_alignment.ix[df_needle_alignment.MIXED,'n_deleted']>0)
if np.isnan(mixed_deleted): mixed_deleted = 0
mixed_mutated = np.sum(df_needle_alignment.ix[df_needle_alignment.MIXED,'n_mutated']>0)
if np.isnan(mixed_mutated): mixed_mutated = 0
with open(_jp('Quantification_of_editing_frequency.txt'),'w+') as outfile:
outfile.write(
('Quantification of editing frequency:\n\t- Unmodified:%d reads\n' %N_UNMODIFIED)\
+('\t- NHEJ:%d reads (%d reads with insertions, %d reads with deletions, %d reads with substitutions)\n' % (N_MODIFIED, nhej_inserted, nhej_deleted, nhej_mutated))\
+('\t- HDR:%d reads (%d reads with insertions, %d reads with deletions, %d reads with substitutions)\n' % (N_REPAIRED, hdr_inserted, hdr_deleted, hdr_mutated))\
+('\t- Mixed HDR-NHEJ:%d reads (%d reads with insertions, %d reads with deletions, %d reads with substitutions)\n\n' % (N_MIXED_HDR_NHEJ, mixed_inserted, mixed_deleted, mixed_mutated))\
+('Total Aligned:%d reads ' % N_TOTAL))
#write alleles table
df_alleles.ix[:,:'%Reads'].to_csv(_jp('Alleles_frequency_table.txt'),sep='\t',header=True,index=None)
#write statistics
with open(_jp('Mapping_statistics.txt'),'w+') as outfile:
outfile.write('READS IN INPUTS:%d\nREADS AFTER PREPROCESSING:%d\nREADS ALIGNED:%d' % (N_READS_INPUT,N_READS_AFTER_PREPROCESSING,N_TOTAL))
if PERFORM_FRAMESHIFT_ANALYSIS:
with open(_jp('Frameshift_analysis.txt'),'w+') as outfile:
outfile.write('Frameshift analysis:\n\tNoncoding mutation:%d reads\n\tIn-frame mutation:%d reads\n\tFrameshift mutation:%d reads\n' %(NON_MODIFIED_NON_FRAMESHIFT, MODIFIED_NON_FRAMESHIFT ,MODIFIED_FRAMESHIFT))
with open(_jp('Splice_sites_analysis.txt'),'w+') as outfile:
outfile.write('Splice sites analysis:\n\tUnmodified:%d reads\n\tPotential splice sites modified:%d reads\n' %(df_needle_alignment.shape[0]- SPLICING_SITES_MODIFIED, SPLICING_SITES_MODIFIED))
save_vector_to_file(effect_vector_insertion_noncoding,'effect_vector_insertion_noncoding')
save_vector_to_file(effect_vector_deletion_noncoding,'effect_vector_deletion_noncoding')
save_vector_to_file(effect_vector_mutation_noncoding,'effect_vector_substitution_noncoding')
save_vector_to_file(effect_vector_insertion,'effect_vector_insertion_NHEJ')
save_vector_to_file(effect_vector_deletion,'effect_vector_deletion_NHEJ')
save_vector_to_file(effect_vector_mutation,'effect_vector_substitution_NHEJ')
save_vector_to_file(effect_vector_combined,'effect_vector_combined')
save_vector_to_file(avg_vector_ins_all,'position_dependent_vector_avg_insertion_size')
save_vector_to_file(avg_vector_del_all,'position_dependent_vector_avg_deletion_size')
pd.DataFrame(np.vstack([hlengths,hdensity]).T,columns=['indel_size','fq']).to_csv(_jp('indel_histogram.txt'),index=None,sep='\t')
pd.DataFrame(np.vstack([x_bins_ins[:-1],y_values_ins]).T,columns=['ins_size','fq']).to_csv(_jp('insertion_histogram.txt'),index=None,sep='\t')
pd.DataFrame(np.vstack([-x_bins_del[:-1],y_values_del]).T,columns=['del_size','fq']).to_csv(_jp('deletion_histogram.txt'),index=None,sep='\t')
pd.DataFrame(np.vstack([x_bins_mut[:-1],y_values_mut]).T,columns=['sub_size','fq']).to_csv(_jp('substitution_histogram.txt'),index=None,sep='\t')
if args.expected_hdr_amplicon_seq:
save_vector_to_file(effect_vector_insertion_mixed,'effect_vector_insertion_mixed_HDR_NHEJ')
save_vector_to_file(effect_vector_deletion_mixed,'effect_vector_deletion_mixed_HDR_NHEJ')
save_vector_to_file(effect_vector_mutation_mixed,'effect_vector_substitution_mixed_HDR_NHEJ')
save_vector_to_file(effect_vector_insertion_hdr,'effect_vector_insertion_HDR')
save_vector_to_file(effect_vector_deletion_hdr,'effect_vector_deletion_HDR')
save_vector_to_file(effect_vector_mutation_hdr,'effect_vector_substitution_HDR')
if cut_points:
cp.dump(sgRNA_intervals, open( _jp('sgRNA_intervals.pickle'), 'wb' ) )
if sgRNA_intervals:
cp.dump( cut_points, open( _jp('cut_points.pickle'), 'wb' ) )
if offset_plots.any():
cp.dump(offset_plots,open( _jp('offset_plots.pickle'), 'wb' ) )
if args.dump:
info('Dumping all the processed data...')
np.savez(_jp('effect_vector_insertion_NHEJ'),effect_vector_insertion)
np.savez(_jp('effect_vector_deletion_NHEJ'),effect_vector_deletion)
np.savez(_jp('effect_vector_substitution_NHEJ'),effect_vector_mutation)
np.savez(_jp('effect_vector_combined'),effect_vector_combined)
np.savez(_jp('position_dependent_vector_avg_insertion_size'),avg_vector_ins_all)
np.savez(_jp('position_dependent_vector_avg_deletion_size'),avg_vector_del_all)
df_needle_alignment.to_pickle(_jp('processed_reads_dataframe.pickle'))
if args.expected_hdr_amplicon_seq:
np.savez(_jp('effect_vector_insertion_mixed_HDR_NHEJ'),effect_vector_insertion_mixed)
np.savez(_jp('effect_vector_deletion_mixed_HDR_NHEJ'),effect_vector_deletion_mixed)
np.savez(_jp('effect_vector_substitution_mixed_HDR_NHEJ'),effect_vector_mutation_mixed)
np.savez(_jp('effect_vector_insertion_HDR'),effect_vector_insertion_hdr)
np.savez(_jp('effect_vector_deletion_HDR'),effect_vector_deletion_hdr)
np.savez(_jp('effect_vector_substitution_HDR'),effect_vector_mutation_hdr)
info('All Done!')
print'''
)
(
__)__
C\| |
\ /
\___/
'''
sys.exit(0)
except NTException as e:
print_stacktrace_if_debug()
error('Alphabet error, please check your input.\n\nERROR: %s' % e)
sys.exit(1)
except SgRNASequenceException as e:
print_stacktrace_if_debug()
error('sgRNA error, please check your input.\n\nERROR: %s' % e)
sys.exit(2)
except DonorSequenceException as e:
print_stacktrace_if_debug()
error('Problem with the expected hdr amplicon sequence parameter, please check your input.\n\nERROR: %s' % e)
sys.exit(3)
except TrimmomaticException as e:
print_stacktrace_if_debug()
error('Trimming error, please check your input.\n\nERROR: %s' % e)
sys.exit(4)
except FlashException as e:
print_stacktrace_if_debug()
error('Merging error, please check your input.\n\nERROR: %s' % e)
sys.exit(5)
except NeedleException as e:
print_stacktrace_if_debug()
error('Alignment error, please check your input.\n\nERROR: %s' % e)
sys.exit(6)
except NoReadsAlignedException as e:
print_stacktrace_if_debug()
error('Alignment error, please check your input.\n\nERROR: %s' % e)
sys.exit(7)
except AmpliconEqualDonorException as e:
print_stacktrace_if_debug()
error('Problem with the expected hdr amplicon sequence parameter, please check your input.\n\nERROR: %s' % e)
sys.exit(8)
except CoreDonorSequenceNotContainedException as e:
print_stacktrace_if_debug()
error('Donor sequence error, please check your input.\n\nERROR: %s' % e)
sys.exit(9)
except CoreDonorSequenceNotUniqueException as e:
print_stacktrace_if_debug()
error('Donor sequence error, please check your input.\n\nERROR: %s' % e)
sys.exit(10)
except ExonSequenceException as e:
print_stacktrace_if_debug()
error('Coding sequence error, please check your input.\n\nERROR: %s' % e)
sys.exit(11)
except DuplicateSequenceIdException as e:
print_stacktrace_if_debug()
error('Fastq file error, please check your input.\n\nERROR: %s' % e)
sys.exit(12)
except NoReadsAfterQualityFiltering as e:
print_stacktrace_if_debug()
error('Filtering error, please check your input.\n\nERROR: %s' % e)
sys.exit(13)
except Exception as e:
print_stacktrace_if_debug()
error('Unexpected error, please check your input.\n\nERROR: %s' % e)
sys.exit(-1)
| agpl-3.0 |
usaskulc/population_matching | resample.py | 1 | 11886 | # sudo pip install hungarian
# sudo easy_install statsmodels
#sudo apt-get install python-pandas
import pandas as pd
import numpy as np
import hungarian
import math
import argparse
from multiprocessing import Pool
from scipy.stats import ks_2samp
from scipy.stats import ttest_ind
from scipy.stats import chisquare
from sklearn import linear_model
import sys
df_full = None
df_columns = None
column_parameters = {}
column_funs = {}
column_weights = {}
def print_report():
#generate an output report
print "Column weights used:"
for attribute in sorted(column_weights.keys()):
print "\t" + attribute.ljust(30) + "\t" + str(column_weights[attribute])
print""
print "Two-sample Kolmogorov-Smirnov:"
ks_vals = ks(df_sample_condition, df_matches)
for attribute in sorted(ks_vals.keys()):
if ks_vals[attribute][1] < 0.1:
print "*",
print "\t" + attribute.ljust(30) + "\tD={:.4f}\tp={:.4f}".format(*ks_vals[attribute])
print ""
print "Independent two-sample t-test:"
for attribute in sorted(df_columns.columns):
if df_columns.ix[0][attribute] == "ignore":
try:
t, p = ttest_ind(df_sample_condition[attribute], df_matches[attribute])
print "\t" + attribute.ljust(30) + "\tt={:.4f}\tp={:.4f}".format(t, p)
except:
pass #oops, must not have been an integer value!
def ks(df_sample_condition, df_match):
"""For every column that has not been set to ignore and is not the match_attribute
this function will perform a two sample Kolmogorov-Smirnov test
"""
ks_results = {}
for column in df_columns:
if df_columns.ix[0][column] != "ignore":
ks_results[column] = ks_2samp(df_sample_condition[column], df_match[column])
return ks_results
def write_output(output, df_sample_condition, df_matches, side_by_side=False):
"""Writes the sample condition and matches to a single csv file. If side_by_side
then the sample condition is written to the left of the matches, with pairs matched
appropriately. Otherwise, the sample condition is written above the matches,
with the matched pairs in order.
"""
if (side_by_side):
columns_to_rename = {}
for column in df_sample_condition.columns:
columns_to_rename[column] = "matched_" + str(column)
df_output = df_matches.rename(columns=columns_to_rename)
df_output = df_output.rename(columns={"_index": "index"}) #rename the index we will join on back
df_output = df_sample_condition.join(df_output)
else:
df_output = pd.concat([df_sample_condition, df_matches])
df_output.to_csv(file_output, encoding='utf-8')
def run_hungarian(matrix, df_population, df_sample_condition):
"""Runs the hungarian linear assignment problem solver from the hungarian package.
Takes in a matrix of datavalues and dataframes for the df_population and the
df_sample_condition. Returns the matches as a new dataframe with the same
structure as the df_population.
"""
row_assigns, col_assigns = hungarian.lap(matrix)
interesting_indicies = []
for i in range(0, len(df_sample_condition)):
interesting_indicies.append(col_assigns[i])
return df_population.ix[interesting_indicies]
def discover_weightings(df_full):
"""Returns a dict of weightings for every column in file_column_definitions
which is not labeled as ignore, and is not the match_attribute. This function
requires that there are no null/None/NaN values in the columns.
TODO: Remove the requirement for no null/None/NaN values.
"""
candidates = []
for column_name in df_columns.columns:
if df_columns[column_name][0] != "ignore":
if column_name != match_attribute:
candidates.append(column_name)
clf = linear_model.LinearRegression()
r_column = df_full[match_attribute]
nr_columns = df_full[candidates]
clf.fit(nr_columns, r_column)
results = {}
for i in range(0, len(clf.coef_)):
results[candidates[i]] = clf.coef_[i]
return results
def diff_two_rows(x, y):
"""Returns difference over all columns in column_definitions between two rows
in a pandas dataframe as a tuple: (average difference, dictionary of column differences)
"""
diffs = {}
for column_name in df_columns.columns:
difference = diff(x[column_name], y[column_name], column_name)
if difference != None:
diffs[column_name] = difference
return (np.mean(diffs.values()), diffs)
def diff_ignore(one, two):
return None
def diff_nominal(one, two):
if one == two:
return 0
else:
return 1
def diff_ordinal(one, two, sorted_range):
#sorted_range=sorted(sorted_range)
pos1 = sorted_range.index(one)
pos2 = sorted_range.index(two)
diff = math.fabs(pos1 - pos2)
return diff / (len(sorted_range) - 1)
def diff_real(one, two, min, max):
top = float(max - min)
one = float(one)
two - float(two)
return math.fabs((one / top) - (two / top))
def diff(one, two, column_name):
#look up the type for the column name
return column_funs[column_name](one, two, **column_parameters[column_name])
def load_data():
global df_full
global df_columns
df_full = pd.read_csv(file_input, na_values=["", " ", "NULL"])
df_columns = pd.read_csv(file_columns, na_values=["", " ", "NULL"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Subsamples a large population based on the characteristics of a small population.')
parser.add_argument('-i', '--input', help='Input filename as a CSV')
parser.add_argument('-o', '--output', help='Output filename for matches')
parser.add_argument('-c', '--column_definitions', help='The CSV of column definitions')
parser.add_argument('-m', '--match', help='The attribute to match one')
parser.add_argument('-w', '--weights', help='Whether weights should be automatically discovered or evenly applied')
args = parser.parse_args()
file_columns = args.column_definitions
file_input = args.input
file_output = args.output
match_attribute = args.match
weights = args.weights
#load datafiles into pandas dataframes
load_data()
#separate the general population for matching and the subsample of interest based on the match_attribute
df_population = df_full[df_full[match_attribute] == 0]
df_sample_condition = df_full[df_full[match_attribute] == 1]
df_population = df_population.reset_index(drop=True)
df_sample_condition = df_sample_condition.reset_index(drop=True)
#make sure sample condition and population and column definitions are all the same size
assert len(df_population.columns) == len(df_columns.columns) == len(
df_sample_condition.columns), "All data files must have the same number of columns"
#make sure that population file has at least one free choice in it
assert len(df_population) > len(
df_sample_condition), "The population file must have more items in it than the sample condition file"
column_weights = {}
if weights == "auto":
#run logistic regression to discover the weightings for each column
column_weights = discover_weightings(df_full)
else:
#set all columns to 1 except those that will not be used
for column_name in df_columns.columns:
if df_columns[column_name][0] != "ignore":
column_weights[column_name] = 1.0
else:
column_weights[column_name] = 0.0
#go through and build shorthand variables for the columns based on the data in the definitions file
for column_name in df_columns:
if df_columns[column_name][0] == "ordinal":
items = []
items.extend(df_sample_condition[column_name].unique().tolist())
items.extend(df_population[column_name].unique().tolist())
s = set()
for item in items:
s.add(item)
#print "Setting ordinal " + str(column_name)
newlist = sorted(s)
column_parameters[column_name] = {"sorted_range": newlist}
column_funs[column_name] = diff_ordinal
elif df_columns[column_name][0] == "real":
mymin = np.min(df_sample_condition[column_name])
if np.min(df_population[column_name]) < mymin:
mymin = np.min(df_population[column_name])
mymax = np.max(df_sample_condition[column_name])
if np.max(df_population[column_name]) > mymax:
mymax = np.max(df_population[column_name])
#print "Setting real " + str(column_name)
column_parameters[column_name] = {"min": mymin, "max": mymax}
column_funs[column_name] = diff_real
elif df_columns[column_name][0] == "ignore":
#print "Setting unique " + str(column_name)
column_parameters[column_name] = {}
column_funs[column_name] = diff_ignore
elif df_columns[column_name][0] == "nominal":
#print "Setting nominal " + str(column_name)
column_parameters[column_name] = {}
column_funs[column_name] = diff_nominal
#add a zero index for anything that does not already have a weight
if column_name not in column_weights.keys():
column_weights[column_name] = 0
#create a matrix filled with ones (worst match value)
matrix = np.ones((len(df_population), len(df_population)), dtype=np.float32)
print "Building difference tables matrix of size (" + str(len(df_population)) + "x" + str(
len(df_population)) + "):",
x_i = 0
for x in df_population.iterrows():
y_i = 0
for y in df_sample_condition.iterrows():
diffs = []
for column_name in df_columns.columns:
difference = diff(x[1][column_name], y[1][column_name], column_name)
#todo: right now if a value is missing we maximize it, setting it to totally different at 1, is this reasonable?
#todo: instead should we just ignore this? or should it be a sort of special value?
if difference != None and np.isnan(difference):
difference = 1
elif difference != None:
diffs.append(difference * column_weights[column_name])
matrix[x_i][y_i] = np.sum(diffs)
y_i += 1
x_i += 1
print ".",
sys.stdout.flush()
print""
#run lap solver
print "Running the LAP using the hungarian method."
df_matches = run_hungarian(matrix, df_population, df_sample_condition)
df_matches = df_matches.reset_index(drop=True)
#write output files
write_output(file_output, df_sample_condition, df_matches)
#write output report
print_report()
"""
#Alternative matches
import time
before = int(round(time.time() * 1000))
print before
matches={}
for rowiter in df_deltas.iterrows():
diff_value=np.mean(rowiter[1])
curr_sample_val=rowiter[0]
candidates=[]
for popiter in df_population.iterrows():
difference_value=diff_two_rows(df_sample_condition.ix[curr_sample_val], df_population.ix[popiter[0]])[0]
if difference_value <= diff_value:
candidates.append( (popiter[0], difference_value) )
matches[curr_sample_val]=candidates
after = int(round(time.time() * 1000))
print after
print str(after-before)
"""
#non_regression=df_john[["GENDER_CODE","BIRTH_YEAR","ABORIGINAL_ANCESTRY_IND","SELF_REPORTED_DISABILITY_IND","cu_year1","entrance_average"]]
#clf.fit(non_regression,regression_val)
#clf.coef_
| mit |
shortlab/hognose | plotting/enhancement.py | 1 | 5336 | # -*- coding: utf-8 -*-
"""
"""
import sys , os, getopt, traceback # First import required modules
import numpy as np # Operating system modules
import matplotlib.pyplot as plt # Numerical python
plt.switch_backend('agg')
import pylab as py
import scipy.optimize # needed for trendline calcs
#plt.ion()
#plt.show()
Kammenzind2016T=np.array([270,310,350])
Kammenzind2016f=np.array([40,30,3.5])
MATPROT=np.array([280,340,400])
MATPROf=np.array([2.5,1.5,1])
Seibold2002T=np.array([310,335])
Seibold2002f=np.array([4,4])
Seibold2002bT=np.array([349.5,350.5])
Seibold2002bf=np.array([3.5,3.5])
Seibold2002fT=np.array([291.7,293.5,296.4,299.3,315.4,317.4,317.4,320.7,322.0])
Seibold2002ff=np.array([15.6,17.7,16.9,16.8,8.9,11.6,8.4,7.4,8.7])
Seibold2002fT2=np.array([325.1,316.0,318.0,318.0,321.1,322.5,326.1,327.7,325.6])
Seibold2002ff2=np.array([5.5,5.5,5.0,4.6,3.6,3.6,3.1,3.8,4.8])
Seibold2002fT3=np.array([329.0,319.7])
Seibold2002ff3=np.array([4.8,3.5])
fig = plt.figure(figsize = (13,8))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=18)
axe.plot(Kammenzind2016T,Kammenzind2016f,'>',markersize=12,label='Kammenzind et al. 2016')
axe.plot(MATPROT,MATPROf,'<',markersize=12,label='MATPRO using '+r'$T_{clad,surface}$')
#axe.plot(Seibold2002T,Seibold2002f,'r-',linewidth=5,label='Seibold et al. 2002')
#axe.plot(Seibold2002bT,Seibold2002bf,'r-',linewidth=5)
axe.plot(Seibold2002fT,Seibold2002ff,'ro',markersize=8,label='Seibold et al. 2002')
axe.plot(Seibold2002fT2,Seibold2002ff2,'ro',markersize=8)
axe.plot(Seibold2002fT3,Seibold2002ff3,'ro',markersize=8)
#axe.plot(time4[0::1],Oxide_thickness4[0::1],'-s',label='value = 0.8')
axe.legend(loc='best') #,ncol=2)
plt.xlim(265,405)
plt.ylim(0,87.5)#41)
plt.ylabel('Reactor Corrosion Rate / Autoclave Corrosion Rate',fontsize=20)
plt.xlabel('Temperature ('+r'$^\circ$'+'C)',fontsize=20)
plt.savefig('fig-enhancement.png',bbox_inches='tight')
HOGNOSEt=np.array([270,290,310,330])
HOGNOSEf=np.array([84.9,42.1,5.5,2.1])
HOGNOSEon=1
if HOGNOSEon==True:
fig = plt.figure(figsize = (13,10))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=28)
fig.subplots_adjust(left = 0.11,top=0.97,right = 0.96)
axe.plot(Kammenzind2016T,Kammenzind2016f,'>',markersize=18,label='Kammenzind et al. 2016')
axe.plot(MATPROT,MATPROf,'<',markersize=18,label='MATPRO using '+r'$T_{clad,surface}$')
axe.plot(Seibold2002fT,Seibold2002ff,'ro',markersize=18,label='Seibold et al. 2002')
axe.plot(Seibold2002fT2,Seibold2002ff2,'ro',markersize=18)
axe.plot(Seibold2002fT3,Seibold2002ff3,'ro',markersize=18)
axe.plot(HOGNOSEt,HOGNOSEf,'k*-',markersize=28,linewidth=6,label='HOGNOSE results at '+r'$\phi = 1.0 \cdot 10^{14} n/cm^2-s$')
axe.xaxis.set_tick_params(length=8,width=2)
axe.yaxis.set_tick_params(length=8,width=2)
for axis in ['top','bottom','left','right']:
axe.spines[axis].set_linewidth(2)
axe.legend(loc='best',fontsize=25) #,ncol=2)
plt.xlim(265,352)
plt.ylim(0,87.5)
plt.ylabel('Enhancement Factor',fontsize=35)
plt.xlabel('Temperature ('+r'$^\circ$'+'C)',fontsize=35)
plt.savefig('fig-HOGNOSE-enhancement.png',dpi=500,bbox_inches='tight')
aftertransition=10
if aftertransition==True:
f270phi=np.array([0.25,0.4,0.7,1.0,1.6])
f270f=np.array([8.5,31.2,81.4,84.9,84.9])
f290phi=np.array([0.25,0.5,0.7,1.0,1.6])
f290f=np.array([2.6,6.5,12.5,42.1,74.3])
f310phi=np.array([0.25,0.5,1.0,1.6])
f310f=np.array([1.4,2.3,5.5,14.4])
f330phi=np.array([0.25,0.5,1.0,1.6])
f330f=np.array([1.1,1.3,2.1,3.8])
else:
f270phi=np.array([0,0.25,0.4,0.7,1.0,1.6])
f270f=np.array([1,4.4,6.9,8.9,9.6,10.3])
f290phi=np.array([0,0.25,0.5,0.7,1.0,1.6])
f290f=np.array([1,2.1,3.7,5.0,7.0,8.2])
f310phi=np.array([0,0.25,0.5,1.0,1.6])
f310f=np.array([1,1.3,1.9,3.3,5.1])
f330phi=np.array([0,0.25,0.5,1.0,1.6])
f330f=np.array([1,1.1,1.3,1.8,2.6])
HOGNOSEonly=1
if HOGNOSEonly==True:
fig = plt.figure(figsize = (13,10))
axe = fig.add_subplot(111)
axe.tick_params(labelsize=28)
fig.subplots_adjust(left = 0.11,top=0.97,right = 0.96)
axe.plot(f270phi,f270f,'ro-',linewidth=4,markersize=18,label='270'+r'$^\circ$'+'C')
axe.plot(f290phi,f290f,'b>-',linewidth=4,markersize=18,label='290'+r'$^\circ$'+'C')
axe.plot(f310phi,f310f,'g<-',linewidth=4,markersize=18,label='310'+r'$^\circ$'+'C')
axe.plot(f330phi,f330f,'k*-',linewidth=4,markersize=28,label='330'+r'$^\circ$'+'C')
axe.legend(loc='best',fontsize=25) #,ncol=2)
axe.xaxis.set_tick_params(length=8,width=2)
axe.yaxis.set_tick_params(length=8,width=2)
for axis in ['top','bottom','left','right']:
axe.spines[axis].set_linewidth(2)
plt.xlim(0,1.75)
axe.xaxis.set_ticks(np.arange(0,1.75,0.25))
if aftertransition==True:
plt.ylim(0,87.5)
else:
plt.ylim(1,11)
axe.yaxis.set_ticks(np.arange(1,11, 1))
plt.ylabel('Enhancement Factor',fontsize=35)
plt.xlabel('Neutron Flux '+r'$\phi $'+' (in multiples of '+r'$10^{14} n/cm^2-s$)',fontsize=35)
plt.savefig('fig-overallHOGNOSE-enhancement.png',dpi=500,bbox_inches='tight')
| lgpl-2.1 |
q1ang/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
megies/numpy | numpy/core/code_generators/ufunc_docstrings.py | 4 | 86755 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a
specific function for that has been written for consistency with
the other *_like functions. It is only used internally in a limited
fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| bsd-3-clause |
martinjrobins/pdeToOffLattice | twoDimUniReactionMoving.py | 1 | 4775 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 24 12:07:07 2014
@author: robinsonm
"""
from fipy import *
from scipy.special import erf, erfc
from math import sqrt
import pyTyche as tyche
import numpy as np
import matplotlib.pyplot as plt
nx = 51
D = 1.
L = 1.
dx = L/nx
conversion_rate = 500.
lam = 10.0**6
k = 0.1
beta = sqrt(k/D)
N = lam/(2*beta*D)
threshold = 5*10**4
mol_dt = 10.0**(-4)
timeStepDuration = mol_dt*5.
steps = 200
#############
# PDE stuff #
#############
mesh = Grid2D(nx=nx,ny=nx)
phi = CellVariable(name="solution variable", mesh=mesh, value=0.)
total = CellVariable(name="total variable", mesh=mesh, value=0.)
mask = CellVariable(name="mask", mesh=mesh, value=0.)
baseEq = TransientTerm() == DiffusionTerm(coeff=D) - ImplicitSourceTerm(coeff=k)
#####################
# Off-lattice stuff #
#####################
A = tyche.new_species([D,D,0])
dummy = tyche.new_species([0,0,0])
not_dummy = tyche.new_species([0,0,0])
grid = tyche.new_structured_grid([0,0,0],[L,L,1],[dx,dx,1])
A.set_grid(grid)
dummy.set_grid(grid)
not_dummy.set_grid(grid)
sink = tyche.new_uni_reaction(conversion_rate,[[A,dummy.pde()],[A.pde()]])
source = tyche.new_zero_reaction_lattice(conversion_rate,[[A.pde(),not_dummy.pde()],[A]])
uni = tyche.new_uni_reaction(k,[[A],[]])
flux = tyche.new_zero_reaction(lam/(dx*dx),[L/2-dx/2,L/2-dx/2,0],[L/2+dx/2,L/2+dx/2,1])
diffusion = tyche.new_diffusion()
algorithm = tyche.group([diffusion,flux,uni,sink,source])
algorithm.add_species(A)
############
# Plotting #
############
def concentration_gradient(x,y,t):
r = np.sqrt((x-L/2.)**2+(y-L/2.)**2)
exact = (lam/(D*beta)) * (
np.exp(-beta*(r))
- 0.5*np.exp(-beta*(r))*erfc((2.0*beta*D*t-(r))/np.sqrt(4.0*D*t))
- 0.5*np.exp(beta*(r))*erfc((2.0*beta*D*t+(r))/np.sqrt(4.0*D*t))
)
return exact
plt.figure()
cv_indicies = range((nx*nx-1)/2-(nx-1)/2,(nx*nx-1)/2+(nx-1)/2+1)
x = np.arange(0,L,dx)+dx/2
print x
xv, yv = np.meshgrid(x, x)
t = 0
analytical_2d = concentration_gradient(xv,yv,t)
plt.subplot(3, 1, 1)
plot_pde, = plt.plot(x,phi.value[cv_indicies],linewidth=2,label='PDE')
off_lattice_concentration = A.get_concentration([0,0,0],[L,L,1],[nx,nx,1])
total.setValue(phi.value+numerix.reshape(off_lattice_concentration[:,:,0],[nx*nx]))
plot_total, = plt.plot(x,total.value[cv_indicies],linewidth=2,label='Total')
plot_off_lattice = plt.bar(x-dx/2,off_lattice_concentration[(nx-1)/2,:,0],width=dx)
plot_analytical, = plt.plot(x,analytical_2d[(nx-1)/2,:],linewidth=2,linestyle='--',label='Analytical')
plt.legend()
plt.ylim(0,N*1.5)
plt.subplot(3, 1, 2)
plot_analytical_2d = plt.imshow(analytical_2d, interpolation='nearest',
origin='bottom',
vmin=0,
vmax=1.5*N,
cmap='jet')
plt.colorbar(plot_analytical_2d)
plt.subplot(3, 1, 3)
plot_total_2d = plt.imshow(numerix.reshape(total.value,[nx,nx]), interpolation='nearest',
origin='bottom',
vmin=0,
vmax=1.5*N,
cmap='jet')
plt.colorbar(plot_total_2d)
#############
# Time Loop #
#############
for step in range(steps):
print "doing step ",step
#plot
plt.savefig("twoDimUniReactionMoving/twoDimUniReactionMoving%04d.png"%step)
mask = (total > threshold)
mask = total > 0
not_mask = mask==False
#set off-lattice generators
phiOld = (phi.value*not_mask).value + 0.0
A.set_pde(numerix.reshape(phiOld,[nx,nx,1]))
dummy.set_pde(numerix.reshape((mask*1.0).value,[nx,nx,1]))
not_dummy.set_pde(numerix.reshape((not_mask*1.0).value,[nx,nx,1]))
#integrate pde model
eq = baseEq + conversion_rate*ImplicitSourceTerm(coeff=mask==False)
eq.solve(var=phi, dt=timeStepDuration)
#integrate off-lattice model
t = algorithm.integrate_for_time(timeStepDuration,mol_dt)
#transfer sink particles to pde
phiNew = phi.value + np.reshape(A.get_pde()[:,:,0],[nx*nx]) - phiOld
phi.setValue(phiNew)
#update plotting
analytical_2d = concentration_gradient(xv,yv,t)
plot_analytical_2d.set_data(analytical_2d)
plot_analytical.set_ydata(analytical_2d[(nx-1)/2,:])
plot_pde.set_ydata(phiNew[cv_indicies])
off_lattice_concentration = A.get_concentration([0,0,0],[L,L,1],[nx,nx,1])
total.setValue(phi.value+np.reshape(off_lattice_concentration[:,:,0],[nx*nx]))
plot_total_2d.set_data(numerix.reshape(total.value,[nx,nx]))
plot_total.set_ydata(total.value[cv_indicies])
for rect, height in zip(plot_off_lattice, off_lattice_concentration[(nx-1)/2,:,0]):
rect.set_height(height)
| gpl-2.0 |
giacomov/3ML | threeML/io/plotting/light_curve_plots.py | 1 | 6500 | from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
import matplotlib.pyplot as plt
import numpy as np
from threeML.config.config import threeML_config
from threeML.io.plotting.step_plot import step_plot
# this file contains routines for plotting binned light curves
def binned_light_curve_plot(
time_bins, cnts, width, bkg=None, selection=None, bkg_selections=None
):
"""
:param time_bins: stacked array of time intervals
:param cnts: counts per bin
:param bkg: background of the light curve
:param width: with of the bins
:param selection: bin selection
:param bkg_selections:
:param instrument:
:return:
"""
fig, ax = plt.subplots()
top = max(old_div(cnts, width)) * 1.2
min_cnts = min(old_div(cnts[cnts > 0], width[cnts > 0])) * 0.95
bottom = min_cnts
mean_time = np.mean(time_bins, axis=1)
all_masks = []
# round
np.round(time_bins, decimals=4, out=time_bins)
light_curve_color = threeML_config["lightcurve"]["lightcurve color"]
selection_color = threeML_config["lightcurve"]["selection color"]
background_color = threeML_config["lightcurve"]["background color"]
background_selection_color = threeML_config["lightcurve"][
"background selection color"
]
# first plot the full lightcurve
step_plot(
time_bins,
old_div(cnts, width),
ax,
color=light_curve_color,
label="Light Curve",
)
if selection is not None:
# now plot the temporal selections
np.round(selection, decimals=4, out=selection)
for tmin, tmax in selection:
tmp_mask = np.logical_and(time_bins[:, 0] >= tmin, time_bins[:, 1] <= tmax)
all_masks.append(tmp_mask)
if len(all_masks) > 1:
for mask in all_masks[1:]:
step_plot(
time_bins[mask],
old_div(cnts[mask], width[mask]),
ax,
color=selection_color,
fill=True,
fill_min=min_cnts,
)
step_plot(
time_bins[all_masks[0]],
old_div(cnts[all_masks[0]], width[all_masks[0]]),
ax,
color=selection_color,
fill=True,
fill_min=min_cnts,
label="Selection",
)
# now plot the background selections
if bkg_selections is not None:
np.round(bkg_selections, decimals=4, out=bkg_selections)
all_masks = []
for tmin, tmax in bkg_selections:
tmp_mask = np.logical_and(time_bins[:, 0] >= tmin, time_bins[:, 1] <= tmax)
all_masks.append(tmp_mask)
if len(all_masks) > 1:
for mask in all_masks[1:]:
step_plot(
time_bins[mask],
old_div(cnts[mask], width[mask]),
ax,
color=background_selection_color,
fill=True,
alpha=0.4,
fill_min=min_cnts,
)
step_plot(
time_bins[all_masks[0]],
old_div(cnts[all_masks[0]], width[all_masks[0]]),
ax,
color=background_selection_color,
fill=True,
fill_min=min_cnts,
alpha=0.4,
label="Bkg. Selections",
zorder=-30,
)
if bkg is not None:
# now plot the estimated background
ax.plot(mean_time, bkg, background_color, lw=2.0, label="Background")
# ax.fill_between(selection, bottom, top, color="#fc8d62", alpha=.4)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Rate (cnts/s)")
ax.set_ylim(bottom, top)
ax.set_xlim(time_bins.min(), time_bins.max())
ax.legend()
return fig
def channel_plot(ax, chan_min, chan_max, counts, **kwargs):
chans = np.vstack([chan_min, chan_max]).T
width = chan_max - chan_min
step_plot(chans, old_div(counts, width), ax, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def disjoint_patch_plot(ax, bin_min, bin_max, top, bottom, mask, **kwargs):
# type: (plt.Axes, np.array, np.array, float, float, np.array, dict) -> None
"""
plots patches that are disjoint given by the mask
:param ax: matplotlib Axes to plot to
:param bin_min: bin starts
:param bin_max: bin stops
:param top: top y value to plot
:param bottom: bottom y value to plot
:param mask: mask of the bins
:param kwargs: matplotlib plot keywords
:return:
"""
# Figure out the best limit
# Find the contiguous regions that are selected
non_zero = (mask).nonzero()[0]
if len(non_zero) > 0:
slices = slice_disjoint(non_zero)
for region in slices:
ax.fill_between(
[bin_min[region[0]], bin_max[region[1]]], bottom, top, **kwargs
)
ax.set_ylim(bottom, top)
def slice_disjoint(arr):
"""
Returns an array of disjoint indices from a bool array
:param arr: and array of bools
"""
slices = []
start_slice = arr[0]
counter = 0
for i in range(len(arr) - 1):
if arr[i + 1] > arr[i] + 1:
end_slice = arr[i]
slices.append([start_slice, end_slice])
start_slice = arr[i + 1]
counter += 1
if counter == 0:
return [[arr[0], arr[-1]]]
if end_slice != arr[-1]:
slices.append([start_slice, arr[-1]])
return slices
def plot_tte_lightcurve(tte_file, start=-10, stop=50, dt=1):
# type: (str, float, float, float) -> plt.Figure
"""
quick plot of a TTE light curve
:param tte_file: GBM TTE file name
:param start: start of the light curve
:param stop: stop of the light curve
:param dt: with of the bins
"""
# build a quick object that will extract the data
# the local import is because GBMTTEFile is dependent
# on other files
from threeML.plugins.FermiGBMTTELike import GBMTTEFile
tte = GBMTTEFile(ttefile=tte_file)
# bin the data with np hist
bins = np.arange(start, stop, step=dt)
counts, bins = np.histogram(tte.arrival_times - tte.trigger_time, bins=bins)
width = np.diff(bins)
time_bins = np.array(list(zip(bins[:-1], bins[1:])))
# plot the light curve
binned_light_curve_plot(time_bins=time_bins, cnts=counts, width=width)
| bsd-3-clause |
KDD-OpenSource/geox-young-academy | day-2/exercises/PCA-SVM-exercise-solution.py | 1 | 5040 | from sklearn import svm
from sklearn import neighbors
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# Note: Functions should go on top, but to group the code based on the sub-tasks, code and functions are mixed here
def show_raw_image(img):
img2 = np.log(img[[2,1,0],:,:])
img2[0,:,:] = img2[0,:,:].copy() * 1.05303 + -6.32792
img2[1,:,:] = img2[1,:,:].copy() * 1.74001 + -10.8407
img2[2,:,:] = img2[2,:,:].copy() * 1.20697 + -6.73016
img2 = np.clip(img2 / 6 + 0.5, 0.0, 1.0)
plt.imshow(np.transpose(img2, (1, 2, 0)))
plt.show()
def compute_mean_PCs(X):
mean = np.mean(X, 0) # Second parameter is the axis along which we want to average (0 == across instances)
mean_free = X-mean
vars_per_img = X.shape[1] * X.shape[2] * X.shape[3]
num_imgs = mean_free.shape[0]
# Flatten data in to vectors
mean_free_vectorized = np.reshape(mean_free, (num_imgs, vars_per_img))
# Increase this to speed up debugging
covar_subsampling = 2
# Accumulate covar matrix
covar = np.zeros((vars_per_img, vars_per_img))
print("Image: 0")
for i in range(0, num_imgs, covar_subsampling):
print("\rImage: {}".format(i))
covar += np.outer(mean_free_vectorized[i,:], mean_free_vectorized[i,:])
covar /= num_imgs/covar_subsampling
eig_val, eig_vec = np.linalg.eig(covar)
# Sort by importance
idx = np.argsort(eig_val)[::-1]
eig_vec = eig_vec[:,idx]
eig_val = eig_val[idx]
# Reshape data back into images. Note that eig_vec is the transpose of what you might expect it to be.
principal_components = np.transpose(eig_vec, (1,0)).reshape((vars_per_img, X.shape[1], X.shape[2], X.shape[3]))
return mean, principal_components, eig_val
def show_first_principal_components(pcs):
f, axarr = plt.subplots(8,8)
for i in range(0,8):
for j in range(0,8):
img2 = pcs[i*8+j,[2,1,0],:,:]
img2 = np.clip(img2 * 10 + 0.5, 0.0, 1.0)
axarr[i,j].imshow(np.transpose(img2, (1, 2, 0)))
plt.show()
def compute_features(X, mean, principal_components, count):
X_mean_free = X - mean
features = np.zeros((X.shape[0], count))
for i in range(0, X.shape[0]):
for j in range(0, count):
# Note: The [i,:,:,:] is being very explicit here. [i] would also work.
features[i,j] = X_mean_free[i,:,:,:].flatten().dot(principal_components[j,:,:,:].flatten())
return features
def reconstruct_image(feature, mean, principal_components):
reconstruction = np.copy(mean)
for i in range(0, feature.shape[0]):
reconstruction += feature[i] * principal_components[i,:,:,:]
return reconstruction
def testAccuracy(classifier, test_X, test_Y):
inferred_Y = classifier.predict(test_X)
return np.mean(test_Y == inferred_Y)
training_data = np.load('data/s2_training_data.npz')
train_X = training_data['data']
train_Y = training_data['labels']
# Take the logarithm
train_X = np.log(train_X)
### Task 1
(mean, principal_components, eig_val) = compute_mean_PCs(train_X)
train_features = compute_features(train_X, mean, principal_components, 16)
### Task 2
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(train_features, train_Y)
### Task 3
print("training accuracy = {}".format(testAccuracy(clf, train_features, train_Y)))
### Task 4
testing_data = np.load('data/s2_testing_data.npz')
test_X = testing_data['data']
test_Y = testing_data['labels']
test_X = np.log(test_X)
test_features = compute_features(test_X, mean, principal_components, 16)
print("test accuracy = {}".format(testAccuracy(clf, test_features, test_Y)))
# Lets test some descriptor sizes:
for i in range(0, 10):
size = 1 << i
print("Running with size = {}".format(size))
train_features = compute_features(train_X, mean, principal_components, size)
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(train_features, train_Y)
print("training accuracy = {}".format(testAccuracy(clf, train_features, train_Y)))
test_features = compute_features(test_X, mean, principal_components, size)
print("test accuracy = {}".format(testAccuracy(clf, test_features, test_Y)))
### Task 5
train_features = compute_features(train_X, mean, principal_components, 64)
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(train_features, train_Y)
application_data = np.load('data/s2_application_data.npz')
application_X = application_data['data']+1e-2
show_raw_image(application_X)
application_X = np.log(application_X)
application_labels = np.zeros((application_X.shape[1]-15, application_X.shape[2]-15))
for y in range(0, application_labels.shape[1]):
print(y)
for x in range(0, application_labels.shape[0]):
crop = np.zeros((1,4,15,15))
crop[0,:,:,:] = application_X[:,y:y+15,x:x+15]
features = compute_features(crop, mean, principal_components, 64)
prediction = clf.predict(features)
application_labels[y,x] = prediction[0]
plt.imshow(application_labels)
plt.show()
| mit |
springer-math/Mathematics-of-Epidemics-on-Networks | docs/examples/fig5p4.py | 1 | 2228 | import EoN
import networkx as nx
import matplotlib.pyplot as plt
import scipy
import random
def get_deg_seq(N, Pk):
while True: #run until degree sequence has even sum of N entries
deg_seq = []
for counter in range(N):
r = random.random()
for k in Pk:
if Pk[k]>r:
break
else:
r-= Pk[k]
deg_seq.append(k)
if sum(deg_seq)%2 ==0:
break
return deg_seq
def sim_and_plot(G, tau, gamma, rho, tmax, tcount, ax):
t, S, I = EoN.fast_SIS(G, tau, gamma, rho = rho, tmax = tmax)
report_times = scipy.linspace(0, tmax, tcount)
I = EoN.subsample(report_times, t, I)
ax.plot(report_times, I/N, color='grey', linewidth=5, alpha=0.3)
t, S, I, = EoN.SIS_heterogeneous_meanfield_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N, '--')
t, S, I = EoN.SIS_compact_pairwise_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N)
t, S, I = EoN.SIS_homogeneous_pairwise_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N, '-.')
N=10000
gamma = 1
rho = 0.05
tmax = 10
tcount = 1001
kmin = 1
kmax = 40
Pk = {}
for k in range(kmin, kmax+1):
Pk[k] = k**(-2.)
norm_factor = sum(Pk.values())
for k in Pk:
Pk[k] /= norm_factor
deg_seq = get_deg_seq(N, Pk)
G = nx.configuration_model(deg_seq)
kave = sum(deg_seq)/N
tau = 1.5*gamma/kave
fig = plt.figure(1)
main = plt.axes()
sim_and_plot(G, tau, gamma, rho, tmax, tcount, main)
kmin = 10
kmax = 150
Pk = {}
for k in range(kmin, kmax+1):
Pk[k] = k**(-2.)
norm_factor = sum(Pk.values())
for k in Pk:
Pk[k] /= norm_factor
deg_seq = get_deg_seq(N, Pk)
G = nx.configuration_model(deg_seq)
kave = (sum(deg_seq)/N)
tau = 1.5*gamma/kave
fig = plt.figure(1)
ax1 = plt.gca()
inset = plt.axes([0.45,0.175,0.45,0.45])
sim_and_plot(G, tau, gamma, rho, tmax, tcount, inset)
ax1.set_xlabel('$t$')
ax1.set_ylabel('Prevalence')
plt.savefig('fig5p4.png') | mit |
osigaud/ArmModelPython | 3RModel_Gao2/install.py | 4 | 1723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher + Olivier Sigaud
Module: install
'''
import os
import site
#------------------- install environment -----------------------------------------------------------------------------------
def checkPackages():
a = site.getsitepackages()
packageList = os.listdir(a[0])
packageNeeded = {}
listOfPackageNeeded = ['pykalman', 'cma', 'cython']
for el in listOfPackageNeeded:
packageNeeded[el] = 0
for el1 in listOfPackageNeeded:
for el2 in packageList:
if el1 in el2:
packageNeeded[el1] = 1
print(packageNeeded)
return packageNeeded
def installMissingPackage(packageList):
a = site.getsitepackages()
a = a[0]
a = a.split('/')
for el in a:
if 'python' in el:
b = el.replace('python', '')
b = int(float(b))
os.system('sudo apt-get install python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose')
if b == 2:
try:
os.system('sudo easy_install numpy scipy Sphinx numpydoc nose pykalman')
os.system('sudo pip install cma')
os.system('sudo easy_install cython')
os.system('sudo pip install distlib')
except:
pass
elif b == 3:
try:
os
os.system('sudo easy_install3 numpy scipy Sphinx numpydoc nose pykalman')
os.system('sudo pip3 install cma')
os.system('sudo easy_install3 cython')
os.system('sudo pip3 install distlib')
except:
pass
os.system('clear')
pk = checkPackages()
installMissingPackage(pk)
| gpl-2.0 |
notmatthancock/acm-computing-seminar | resources/prog/example-assignment/figures/source/plot-figs.py | 2 | 1369 | # This is a python script to create the figures
# using `matplotlib`.
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('classic')
title = {'forw': "Forward Difference",
'back': "Backward Difference",
'cent': "Central Difference"}
linestyles = ['-ok', '-^k']
# Create plots for function 1 first.
def make_plots(func):
assert func in ['f1', 'f2']
fig, ax = plt.subplots(1, 3, figsize=(16,8))
if func == 'f1':
xs = ['1.0', '2.0']
else:
xs = ['1.1', '2.2']
for i,diff_type in enumerate(['forw', 'back', 'cent']):
for j,x in enumerate(xs):
D = np.genfromtxt('../../data/%s-%s-%s-h-and-errs.txt' %
(diff_type, func, x))
ax[i].plot(D[:,0], D[:,1], linestyles[j], label="$x=%s$"%x)
if i==0:
ax[i].set_ylabel('Absolute Error')
ax[i].set_title(title[diff_type])
ax[i].set_xscale('log'); ax[i].set_yscale('log')
ax[i].set_xlabel('$h$', fontsize=20)
ax[i].grid('on')
ax[i].legend(loc=2)
ax[i].set_xlim(1e-7, 1e-1)
ax[i].set_ylim(1e-14, 1e-1)
if func == 'f1':
stitle = "$f(x) = \\sin(x)$"
else:
stitle = "$f(x) = \\exp(-0.5x^2)$"
fig.savefig('../%s.pdf'%func, bbox_inches='tight')
#plt.show()
make_plots('f1')
make_plots('f2')
| mit |
brookehus/msmbuilder | msmbuilder/io/io.py | 6 | 9756 | # Author: Matthew Harrigan <matthew.harrigan@outlook.com>
# Contributors:
# Copyright (c) 2016, Stanford University
# All rights reserved.
from __future__ import print_function, division, absolute_import
import os
import pickle
import re
import shutil
import stat
import warnings
import mdtraj as md
import numpy as np
import pandas as pd
from jinja2 import Environment, PackageLoader
__all__ = ['backup', 'preload_top', 'preload_tops', 'load_meta', 'load_generic',
'load_trajs', 'save_meta', 'render_meta', 'save_generic',
'itertrajs', 'save_trajs', 'ProjectTemplatej']
class BackupWarning(UserWarning):
pass
def backup(fn):
"""If ``fn`` exists, rename it and issue a warning
This function will rename an existing filename {fn}.bak.{i} where
i is the smallest integer that gives a filename that doesn't exist.
This naively uses a while loop to find such a filename, so there
shouldn't be too many existing backups or performance will degrade.
Parameters
----------
fn : str
The filename to check.
"""
if not os.path.exists(fn):
return
backnum = 1
backfmt = "{fn}.bak.{backnum}"
trial_fn = backfmt.format(fn=fn, backnum=backnum)
while os.path.exists(trial_fn):
backnum += 1
trial_fn = backfmt.format(fn=fn, backnum=backnum)
warnings.warn("{fn} exists. Moving it to {newfn}"
.format(fn=fn, newfn=trial_fn),
BackupWarning)
shutil.move(fn, trial_fn)
def chmod_plus_x(fn):
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IEXEC)
def default_key_to_path(key, dfmt="{}", ffmt="{}.npy"):
"""Turn an arbitrary python object into a filename
This uses string formatting, so make sure your keys map
to unique strings. If the key is a tuple, it will join each
element of the tuple with '/', resulting in a filesystem
hierarchy of files.
"""
if isinstance(key, tuple):
paths = [dfmt.format(k) for k in key[:-1]]
paths += [ffmt.format(key[-1])]
return os.path.join(*paths)
else:
return ffmt.format(key)
def validate_keys(keys, key_to_path_func=None,
valid_re=r"[a-zA-Z0-9_\-\.]+(\/[a-zA-Z0-9_\-\.]+)*"):
if key_to_path_func is None:
key_to_path_func = default_key_to_path
err = "Key must match regular expression {}".format(valid_re)
for k in keys:
ks = key_to_path_func(k)
assert isinstance(ks, str), "Key must convert to a string"
assert re.match(valid_re, ks), err
def preload_tops(meta):
"""Load all topology files into memory.
This might save some performance compared to re-parsing the topology
file for each trajectory you try to load in. Typically, you have far
fewer (possibly 1) topologies than trajectories
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata with a column named 'top_fn'
Returns
-------
tops : dict
Dictionary of ``md.Topology`` objects, keyed by "top_fn"
values.
"""
top_fns = set(meta['top_fn'])
tops = {}
for tfn in top_fns:
tops[tfn] = md.load_topology(tfn)
return tops
def preload_top(meta):
"""Load one topology file into memory.
This function checks to make sure there's only one topology file
in play. When sampling frames, you have to have all the same
topology to concatenate.
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata with a column named 'top_fn'
Returns
-------
top : md.Topology
The one topology file that can be used for all trajectories.
"""
top_fns = set(meta['top_fn'])
if len(top_fns) != 1:
raise ValueError("More than one topology is used in this project!")
return md.load_topology(top_fns.pop())
def itertrajs(meta, stride=1):
"""Load one mdtraj trajectory at a time and yield it.
MDTraj does striding badly. It reads in the whole trajectory and
then performs a stride. We join(iterload) to conserve memory.
"""
tops = preload_tops(meta)
for i, row in meta.iterrows():
yield i, md.join(md.iterload(row['traj_fn'],
top=tops[row['top_fn']],
stride=stride),
discard_overlapping_frames=False,
check_topology=False)
def load_meta(meta_fn='meta.pandas.pickl'):
"""Load metadata associated with a project.
Parameters
----------
meta_fn : str
The filename
Returns
-------
meta : pd.DataFrame
Pandas DataFrame where each row contains metadata for a
trajectory.
"""
return pd.read_pickle(meta_fn)
def save_meta(meta, meta_fn='meta.pandas.pickl'):
"""Save metadata associated with a project.
Parameters
----------
meta : pd.DataFrame
The DataFrame of metadata
meta_fn : str
The filename
"""
backup(meta_fn)
pd.to_pickle(meta, meta_fn)
def render_meta(meta, fn="meta.pandas.html",
title="Project Metadata - MSMBuilder", pandas_kwargs=None):
"""Render a metadata dataframe as an html webpage for inspection.
Parameters
----------
meta : pd.Dataframe
The DataFrame of metadata
fn : str
Output filename (should end in html)
title : str
Page title
pandas_kwargs : dict
Arguments to be passed to pandas
"""
if pandas_kwargs is None:
pandas_kwargs = {}
kwargs_with_defaults = {
'classes': ('table', 'table-condensed', 'table-hover'),
}
kwargs_with_defaults.update(**pandas_kwargs)
env = Environment(loader=PackageLoader('msmbuilder', 'io_templates'))
templ = env.get_template("twitter-bootstrap.html")
rendered = templ.render(
title=title,
content=meta.to_html(**kwargs_with_defaults)
)
# Ugh, pandas hardcodes border="1"
rendered = re.sub(r' border="1"', '', rendered)
backup(fn)
with open(fn, 'w') as f:
f.write(rendered)
def save_generic(obj, fn):
"""Save Python objects, including msmbuilder Estimators.
This is a convenience wrapper around Python's ``pickle``
serialization scheme. This protocol is backwards-compatible
among Python versions, but may not be "forwards-compatible".
A file saved with Python 3 won't be able to be opened under Python 2.
Please read the pickle docs (specifically related to the ``protocol``
parameter) to specify broader compatibility.
If a file already exists at the given filename, it will be backed
up.
Parameters
----------
obj : object
A Python object to serialize (save to disk)
fn : str
Filename to save the object. We recommend using the '.pickl'
extension, but don't do anything to enforce that convention.
"""
backup(fn)
with open(fn, 'wb') as f:
pickle.dump(obj, f)
def load_generic(fn):
"""Load Python objects, including msmbuilder Estimators.
This is a convenience wrapper around Python's ``pickle``
serialization scheme.
Parameters
----------
fn : str
Load this file
Returns
-------
object : object
The object.
"""
with open(fn, 'rb') as f:
return pickle.load(f)
def save_trajs(trajs, fn, meta, key_to_path=None):
"""Save trajectory-like data
Data is stored in individual numpy binary files in the
directory given by ``fn``.
This method will automatically back up existing files named ``fn``.
Parameters
----------
trajs : dict of (key, np.ndarray)
Dictionary of trajectory-like ndarray's keyed on ``meta.index``
values.
fn : str
Where to save the data. This will be a directory containing
one file per trajectory
meta : pd.DataFrame
The DataFrame of metadata
"""
if key_to_path is None:
key_to_path = default_key_to_path
validate_keys(meta.index, key_to_path)
backup(fn)
os.mkdir(fn)
for k in meta.index:
v = trajs[k]
npy_fn = os.path.join(fn, key_to_path(k))
os.makedirs(os.path.dirname(npy_fn), exist_ok=True)
np.save(npy_fn, v)
def load_trajs(fn, meta='meta.pandas.pickl', key_to_path=None):
"""Load trajectory-like data
Data is expected to be stored as if saved by ``save_trajs``.
This method finds trajectories based on the ``meta`` dataframe.
If you remove a file (trajectory) from disk, be sure to remove
its row from the dataframe. If you remove a row from the dataframe,
be aware that that trajectory (file) will not be loaded, even if
it exists on disk.
Parameters
----------
fn : str
Where the data is saved. This should be a directory containing
one file per trajectory.
meta : pd.DataFrame or str
The DataFrame of metadata. If this is a string, it is interpreted
as a filename and the dataframe is loaded from disk.
Returns
-------
meta : pd.DataFrame
The DataFrame of metadata. If you passed in a string (filename)
to the ``meta`` input, this will be the loaded DataFrame. If
you gave a DataFrame object, this will just be a reference back
to that object
trajs : dict
Dictionary of trajectory-like np.ndarray's keyed on the values
of ``meta.index``.
"""
if key_to_path is None:
key_to_path = default_key_to_path
if isinstance(meta, str):
meta = load_meta(meta_fn=meta)
trajs = {}
for k in meta.index:
trajs[k] = np.load(os.path.join(fn, key_to_path(k)))
return meta, trajs
| lgpl-2.1 |
trungnt13/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
andyh616/mne-python | tutorials/plot_epochs_to_data_frame.py | 12 | 8847 | """
.. _tut_io_export_pandas:
=================================
Export epochs to Pandas DataFrame
=================================
In this example the pandas exporter will be used to produce a DataFrame
object. After exploring some basic features a split-apply-combine
work flow will be conducted to examine the latencies of the response
maxima across epochs and conditions.
Note. Equivalent methods are available for raw and evoked data objects.
Short Pandas Primer
-------------------
Pandas Data Frames
~~~~~~~~~~~~~~~~~~
A data frame can be thought of as a combination of matrix, list and dict:
It knows about linear algebra and element-wise operations but is size mutable
and allows for labeled access to its data. In addition, the pandas data frame
class provides many useful methods for restructuring, reshaping and visualizing
data. As most methods return data frame instances, operations can be chained
with ease; this allows to write efficient one-liners. Technically a DataFrame
can be seen as a high-level container for numpy arrays and hence switching
back and forth between numpy arrays and DataFrames is very easy.
Taken together, these features qualify data frames for inter operation with
databases and for interactive data exploration / analysis.
Additionally, pandas interfaces with the R statistical computing language that
covers a huge amount of statistical functionality.
Export Options
~~~~~~~~~~~~~~
The pandas exporter comes with a few options worth being commented.
Pandas DataFrame objects use a so called hierarchical index. This can be
thought of as an array of unique tuples, in our case, representing the higher
dimensional MEG data in a 2D data table. The column names are the channel names
from the epoch object. The channels can be accessed like entries of a
dictionary:
df['MEG 2333']
Epochs and time slices can be accessed with the .ix method:
epochs_df.ix[(1, 2), 'MEG 2333']
However, it is also possible to include this index as regular categorial data
columns which yields a long table format typically used for repeated measure
designs. To take control of this feature, on export, you can specify which
of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
index using the index parameter. Note that this decision is revertible any
time, as demonstrated below.
Similarly, for convenience, it is possible to scale the times, e.g. from
seconds to milliseconds.
Some Instance Methods
~~~~~~~~~~~~~~~~~~~~~
Most numpy methods and many ufuncs can be found as instance methods, e.g.
mean, median, var, std, mul, , max, argmax etc.
Below an incomplete listing of additional useful data frame instance methods:
apply : apply function to data.
Any kind of custom function can be applied to the data. In combination with
lambda this can be very useful.
describe : quickly generate summary stats
Very useful for exploring data.
groupby : generate subgroups and initialize a 'split-apply-combine' operation.
Creates a group object. Subsequently, methods like apply, agg, or transform
can be used to manipulate the underlying data separately but
simultaneously. Finally, reset_index can be used to combine the results
back into a data frame.
plot : wrapper around plt.plot
However it comes with some special options. For examples see below.
shape : shape attribute
gets the dimensions of the data frame.
values :
return underlying numpy array.
to_records :
export data as numpy record array.
to_dict :
export data as dict of arrays.
Reference
~~~~~~~~~
More information and additional introductory materials can be found at the
pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
"""
# Author: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.io import Raw
from mne.datasets import sample
print(__doc__)
# turn on interactive mode
plt.ion()
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = Raw(raw_fname)
# For simplicity we will only consider the first 10 epochs
events = mne.read_events(event_fname)[:10]
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=True, reject=reject)
###############################################################################
# Export DataFrame
# The following parameters will scale the channels and times plotting
# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
# index whereas the condition is treated as categorial data. Note that
# this is optional. By passing None you could also print out all nesting
# factors in a long table style commonly used for analyzing repeated measure
# designs.
index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = epochs.to_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
index=index)
# Create MEG channel selector and drop EOG channel.
meg_chs = [c for c in df.columns if 'MEG' in c]
df.pop('EOG 061') # this works just like with a list.
###############################################################################
# Explore Pandas MultiIndex
# Pandas is using a MultiIndex or hierarchical index to handle higher
# dimensionality while at the same time representing data in a flat 2d manner.
print(df.index.names, df.index.levels)
# Inspecting the index object unveils that 'epoch', 'time' are used
# for subsetting data. We can take advantage of that by using the
# .ix attribute, where in this case the first position indexes the MultiIndex
# and the second the columns, that is, channels.
# Plot some channels across the first three epochs
xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
df.ix[:3, sel].plot(xticks=xticks)
mne.viz.tight_layout()
# slice the time starting at t0 in epoch 2 and ending 500ms after
# the base line in epoch 3. Note that the second part of the tuple
# represents time in milliseconds from stimulus onset.
df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
mne.viz.tight_layout()
# Note: For convenience the index was converted from floating point values
# to integer values. To restore the original values you can e.g. say
# df['times'] = np.tile(epoch.times, len(epochs_times)
# We now reset the index of the DataFrame to expose some Pandas
# pivoting functionality. To simplify the groupby operation we
# we drop the indices to treat epoch and time as categroial factors.
df = df.reset_index()
# The ensuing DataFrame then is split into subsets reflecting a crossing
# between condition and trial number. The idea is that we can broadcast
# operations into each cell simultaneously.
factors = ['condition', 'epoch']
sel = factors + ['MEG 1332', 'MEG 1342']
grouped = df[sel].groupby(factors)
# To make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
# Now we compare the mean of two channels response across conditions.
grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
color=['steelblue', 'orange'])
mne.viz.tight_layout()
# We can even accomplish more complicated tasks in a few lines calling
# apply method and passing a function. Assume we wanted to know the time
# slice of the maximum response for each condition.
max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
print(max_latency)
# Then make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
plt.figure()
max_latency.plot(kind='barh', title='Latency of Maximum Reponse',
color=['steelblue'])
mne.viz.tight_layout()
# Finally, we will again remove the index to create a proper data table that
# can be used with statistical packages like statsmodels or R.
final_df = max_latency.reset_index()
final_df.rename(columns={0: sel[2]}) # as the index is oblivious of names.
# The index is now written into regular columns so it can be used as factor.
print(final_df)
# To save as csv file, uncomment the next line.
# final_df.to_csv('my_epochs.csv')
# Note. Data Frames can be easily concatenated, e.g., across subjects.
# E.g. say:
#
# import pandas as pd
# group = pd.concat([df_1, df_2])
# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
| bsd-3-clause |
dkandalov/katas | python/ml/scikit/svm_gui.py | 1 | 10729 | from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv) | unlicense |
qifeigit/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
infoxchange/lettuce | tests/integration/django/dill/leaves/features/steps.py | 17 | 1432 | import json
from django.core.management import call_command
from leaves.models import *
from lettuce import after, step
from lettuce.django.steps.models import *
from nose.tools import assert_equals
after.each_scenario(clean_db)
max_rego = 0
@creates_models(Harvester)
def create_with_rego(step):
data = hashes_data(step)
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
create_models(Harvester, data)
@checks_existence(Harvester)
def check_with_rego(step):
data = hashes_data(step)
for hash_ in data:
try:
hash_['rego'] = hash_['rego'].upper()
except KeyError:
pass
models_exist(Harvester, data)
@step(r'The database dump is as follows')
def database_dump(step):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
output = StringIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print "Harvester count: %d" % Harvester.objects.count()
@creates_models(Panda)
def create_pandas(step):
data = hashes_data(step)
if 'name' in data:
data['name'] += ' Panda'
return create_models(Panda, data)
| gpl-3.0 |
DamCB/tyssue | tests/geometry/test_planar_geometry.py | 2 | 2016 | import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from tyssue import config
from tyssue.core import Epithelium
from tyssue.generation import (
three_faces_sheet,
extrude,
hexa_grid3d,
hexa_grid2d,
subdivide_faces,
)
from tyssue.geometry.planar_geometry import PlanarGeometry
from numpy import pi
def test_face_projected_pos():
datasets = {}
tri_verts = [[0, 0], [1, 0], [-0.5, 0.75], [-0.5, -0.75]]
tri_edges = [
[0, 1, 0],
[1, 2, 0],
[2, 0, 0],
[0, 3, 1],
[3, 1, 1],
[1, 0, 1],
[0, 2, 2],
[2, 3, 2],
[3, 0, 2],
]
datasets["edge"] = pd.DataFrame(
data=np.array(tri_edges), columns=["srce", "trgt", "face"]
)
datasets["edge"].index.name = "edge"
datasets["face"] = pd.DataFrame(data=np.zeros((3, 2)), columns=["x", "y"])
datasets["face"].index.name = "face"
datasets["vert"] = pd.DataFrame(data=np.array(tri_verts), columns=["x", "y"])
datasets["vert"].index.name = "vert"
specs = config.geometry.planar_spec()
eptm = Epithelium("extra", datasets, specs, coords=["x", "y"])
PlanarGeometry.update_all(eptm)
res_rot_pos_pi2 = PlanarGeometry.face_projected_pos(eptm, 0, pi / 2.0)
res_rot_pos_face1_2pi = PlanarGeometry.face_projected_pos(eptm, 1, 2.0 * pi)
expected_rot_pos_pi2 = pd.DataFrame.from_dict(
{
"vert": [0, 1, 2, 3],
"x": [0.25, 0.25, -0.5, 1.0],
"y": [-0.166667, 0.8333333, -0.666667, -0.666667],
}
).set_index("vert")
expected_rot_pos_face1_2pi = pd.DataFrame.from_dict(
{
"vert": [0, 1, 2, 3],
"x": [-0.166667, 0.833333, -0.666667, -0.666667],
"y": [0.25, 0.25, 1.00, -0.5],
}
).set_index("vert")
tolerance = 1e-16
assert all((expected_rot_pos_pi2 - res_rot_pos_pi2) ** 2 < tolerance)
assert all((expected_rot_pos_face1_2pi - res_rot_pos_face1_2pi) ** 2 < tolerance)
| gpl-3.0 |
aewhatley/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
kaushikcfd/eikonal-unstructured | Debug/debugValues/plotValues.py | 1 | 1783 | # Reference: http://matplotlib.org/examples/pylab_examples/triplot_demo.html
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Times']})
x = []
y = []
T = []
triangles = []
#-----------------------------------------------------------------------------
# Reading values from the files
#-----------------------------------------------------------------------------
with open('square-refined-Nodes.dat', 'r') as f:
noNodes = int(f.readline())
for i in range(noNodes):
x_y = map(float, f.readline().strip().split())
x.append(x_y[0])
y.append(x_y[1])
with open('square-refined-Elements.dat', 'r') as f:
noElements = int(f.readline())
for i in range(noElements):
node1_2_3 = map(int, f.readline().strip().split())
triangles.append(node1_2_3)
with open('square-refined.dat', 'r') as f:
noValues = int(f.readline())
for i in range(noValues):
t = float(f.readline())
T.append(t)
#-----------------------------------------------------------------------------
# Defining the triangulation, with the help of the values that have been read.
#-----------------------------------------------------------------------------
triang = tri.Triangulation(x, y, triangles)
#-----------------------------------------------------------------------------
# Plotting the figure
#-----------------------------------------------------------------------------
plt.figure()
plt.gca().set_aspect('equal')
plt.tricontour(triang, T, 10)
plt.colorbar()
#plt.title("Planar Wavefront on Unstructured Grid")
plt.title(r"$F = 2 - \frac{1}{2}\cos^2\left(\pi\left(y-\frac{1}{2}\right)\right)$- Rectangular Structured Grid")
plt.show()
| gpl-3.0 |
casimp/pyxe | bin/data_creation.py | 3 | 3978 | import numpy as np
import matplotlib.pyplot as plt
import time
from pyxe.williams import sigma_xx, sigma_yy, sigma_xy, cart2pol
from pyxe.fitting_functions import strain_transformation, shear_transformation
def plane_strain_s2e(sigma_xx, sigma_yy, sigma_xy, E, v, G=None):
if G is None:
G = E / (2 * (1 - v))
e_xx = (1 / E) * (sigma_xx - v*sigma_yy)
e_yy = (1 / E) * (sigma_yy - v*sigma_xx)
e_xy = sigma_xy / G
return e_xx, e_yy, e_xy
class StrainField(object):
def __init__(self, x, y, K, E, v, G=None, state='plane strain'):
self.K = K
self.x = x
self.y = y
self.r, self.theta = cart2pol(x, y)
self.sig_xx = sigma_xx(self.K, self.r, self.theta)
self.sig_yy = sigma_yy(self.K, self.r, self.theta)
self.sig_xy = sigma_xy(self.K, self.r, self.theta)
sigma_comp = self.sig_xx, self.sig_yy, self.sig_xy
stress2strain = plane_strain_s2e if state == 'plane strain' else None
data = stress2strain(*sigma_comp, E, v, G)
self.e_xx, self.e_yy, self.e_xy = data
def extract_strain_map(self, phi=np.pi/2, shear=False):
trans = strain_transformation if not shear else shear_transformation
e = trans(phi, self.e_xx, self.e_yy, self.e_xy)
return e
def plot_strain_map(self, phi=np.pi/2, shear=False):
e = self.extract_strain_map(phi, shear)
plt.contourf(self.x, self.y, e, 21)
plt.show()
def extract_stress_map(self, phi=np.pi/2, shear=False):
trans = strain_transformation if not shear else shear_transformation
sig = trans(phi, self.sig_xx, self.sig_yy, self.sig_xy)
return sig
def plot_stress_map(self, phi=np.pi/2, shear=False):
sig = self.extract_stress_map(phi, shear)
plt.contourf(self.x, self.y, sig, 21)
plt.show()
def extract_strain_array(self, phi):
"""
Add valus for phi
:param phi:
:return:
"""
strain = np.nan * np.ones((self.x.shape + (1,) + phi.shape))
for idx, tt in enumerate(phi):
e_xx1 = strain_transformation(tt, self.e_xx, self.e_yy, self.e_xy)
strain[:, :, 0, idx] = e_xx1
return strain
def create_nxs_shell(x, y, phi):
group = None
ss2_x = x
ss2_y = y
ss2_x = None
scan_command = [b'ss2_x', b'ss2_y']
phi = phi
q = 0
I = 0
# create nxs
# h5py.save
# load nxs and fill with data
def add_strain_field(data, K, E, v, G=None, state='plane strain'):
crack_field = StrainField(data.ss2_x, data.ss2_y, K, E, v, G, state)
data.strain = crack_field.extract_strain_array(data.phi)
data.strain_err = np.zeros_like(data.strain)
return crack_field
x = np.linspace(-0.5, 1, 100)
y = np.linspace(-0.75, 0.75, 100)
X, Y = np.meshgrid(x, y)
data = StrainField(X, Y, 20*10**6, 200*10**9, 0.3)
data.create_nxs(np.linspace(0, np.pi, 10))
#sigma_array = np.nan * np.ones((y.size, x.size, 1, n_phi))
#for idx, tt in enumerate(np.linspace(0, np.pi, n_phi)):
# sigma_array[:, :, 0, idx] = strain_transformation(tt, *(sig_xx, sig_yy, sig_xy))
#e_xx, e_yy, e_xy = plane_strain_s2e(sig_xx, sig_yy, sig_xy, 200 * 10 **9, 0.3)
#strain_array = np.nan * np.ones((y.size, x.size, 1, n_phi))
#for idx, tt in enumerate(np.linspace(0, np.pi, n_phi)):
# e_xx1 = strain_transformation(tt, *(e_xx, e_yy, e_xy))
# strain_array[:, :, 0, idx] = e_xx1
# plt.figure()
# e_xx1[e_xx1>0.004]=0.004
# e_xx1[e_xx1 < -0.001] = -0.001
# plt.contourf(X, Y, e_xx1, np.linspace(-0.001, 0.004, 25))
# plt.colorbar()
# plt.contour(X, Y, e_xx1, np.linspace(-0.001, 0.004, 25), colors = 'k', linewidths=0.4, aplha=0.3)
# plt.savefig(r'C:\Users\lbq76018\Documents\Python Scripts\pyxe_fake\%03d.png' % idx)
#plt.show()
#plt.figure()
#c = plt.contourf(X, Y, sig_yy, 25)
#plt.colorbar()
#plt.figure()
#c = plt.contourf(X, Y, e_yy, 25)
#plt.colorbar()
#plt.show()
#print(sigma_array)
| mit |
morepj/numerical-mooc | lessons/02_spacetime/solutions/01.py | 3 | 1024 | import numpy #here we load numpy
import matplotlib.pyplot as plt #here we load matplotlib, calling it 'plt'
import time, sys #and load some utilities
nx = 41 # try changing this number from 41 to 81 and Run All ... what happens?
dx = 2./(nx-1)
nt = 25 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1. #assume wavespeed of c = 1
u = numpy.ones(nx) #numpy function ones()
u[.5/dx : 1/dx+1]=2 #setting u = 2 between 0.5 and 1 as per our I.C.s
print u
un = numpy.ones(nx) #initialize a temporary array
for n in range(nt): #loop for values of n from 0 to nt, so it will run nt times
un = u.copy() ##copy the existing values of u into un
for i in range(1,nx): ## you can try commenting this line and...
#for i in range(nx): ## ... uncommenting this line and see what happens!
u[i] = un[i]-c*dt/dx*(un[i]-un[i-1])
plt.plot(numpy.linspace(0,2,nx),u)
plt.show()
| mit |
procoder317/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
tartavull/google-cloud-python | docs/conf.py | 2 | 9905 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 21 22:24:47 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import email
import os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'google-cloud'
copyright = u'2014-2017, Google'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
distro = pkg_resources.get_distribution('google-cloud')
release = os.getenv('SPHINX_RELEASE', distro.version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'description': 'Google Cloud Client Libraries for Python',
'github_user': 'GoogleCloudPlatform',
'github_repo': 'google-cloud-python',
'github_banner': True,
'travis_button': True,
'font_family': "'Roboto', Georgia, sans",
'head_font_family': "'Roboto', Georgia, serif",
'code_font_family': "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_add_permalinks = '#'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'google-cloud-doc'
html_context = {}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
metadata = distro.get_metadata(distro.PKG_INFO)
author = email.message_from_string(metadata).get('Author')
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'google-cloud.tex', u'google-cloud Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for warnings ------------------------------------------------------
suppress_warnings = []
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'google-cloud', u'google-cloud Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'google-cloud', u'google-cloud Documentation',
author, 'google-cloud', 'Python API for Google Cloud.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# This pulls class descriptions from the class docstring,
# and parameter definitions from the __init__ docstring.
autoclass_content = 'both'
# Configuration for intersphinx:
intersphinx_mapping = {
'google-auth': ('https://google-auth.readthedocs.io/en/stable', None),
'google-gax': ('https://gax-python.readthedocs.io/en/latest/', None),
'grpc': ('http://www.grpc.io/grpc/python/', None),
'requests': ('http://docs.python-requests.org/en/master/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3', None),
}
| apache-2.0 |
imperial-genomics-facility/data-management-python | test/process/reformat_samplesheet_file_test.py | 1 | 2785 | import unittest,os
import pandas as pd
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.process.metadata_reformat.reformat_samplesheet_file import Reformat_samplesheet_file,SampleSheet
class Reformat_samplesheet_file_testA(unittest.TestCase):
def setUp(self):
self.tmp_dir = get_temp_dir()
def tearDown(self):
remove_dir(self.tmp_dir)
def test_detect_tenx_barcodes(self):
description = \
Reformat_samplesheet_file.\
detect_tenx_barcodes(index='SI-GA-A1')
self.assertEqual(description,'10X')
def test_correct_samplesheet_data_row(self):
data = pd.Series(\
{'Lane':1,
'Sample_ID':'IGF1 ',
'Sample_Name':'samp_(1)',
'index':'SI-GA-A1',
'Sample_Project':'IGFQ scRNA-seq5primeFB',
'Description':''})
re_samplesheet = \
Reformat_samplesheet_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_samplesheet.csv')
data = \
re_samplesheet.\
correct_samplesheet_data_row(row=data)
self.assertEqual(data['Sample_ID'],'IGF1')
self.assertEqual(data['Sample_Name'],'samp-1')
self.assertEqual(data['Sample_Project'],'IGFQ-scRNA-seq5primeFB')
self.assertEqual(data['Description'],'10X')
def test_reformat_raw_samplesheet_file(self):
re_samplesheet = \
Reformat_samplesheet_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_samplesheet.csv',
remove_adapters=True)
output_file = os.path.join(self.tmp_dir,'samplesheet.csv')
re_samplesheet.\
reformat_raw_samplesheet_file(\
output_file=output_file)
sa = SampleSheet(infile=output_file)
self.assertFalse(sa.check_sample_header('Settings','Adapter'))
data = pd.DataFrame(sa._data)
sample1 = data[data['Sample_ID']=='IGF1']
self.assertEqual(sample1['Sample_Name'].values[0],'samp-1')
self.assertEqual(sample1['Sample_Project'].values[0],'IGFQ1-scRNA-seq5primeFB')
self.assertEqual(sample1['Description'].values[0],'10X')
def test_reformat_samplesheet_data_file(self):
re_samplesheet = \
Reformat_samplesheet_file(\
infile='data/metadata_validation/metadata_reformatting/incorrect_samplesheet_data.csv',
file_format='csv',
remove_adapters=True)
output_file = os.path.join(self.tmp_dir,'samplesheet.csv')
re_samplesheet.\
reformat_raw_samplesheet_file(\
output_file=output_file)
data = pd.read_csv(output_file)
sample1 = data[data['Sample_ID']=='IGF1']
self.assertEqual(sample1['Sample_Name'].values[0],'samp-1')
self.assertEqual(sample1['Sample_Project'].values[0],'IGFQ1-scRNA-seq5primeFB')
self.assertEqual(sample1['Description'].values[0],'10X')
if __name__ == '__main__':
unittest.main() | apache-2.0 |
iemejia/beam | sdks/python/apache_beam/runners/interactive/utils_test.py | 5 | 10771 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import unittest
from typing import NamedTuple
from unittest.mock import PropertyMock
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import apache_beam as beam
from apache_beam import coders
from apache_beam.dataframe.convert import to_dataframe
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import utils
from apache_beam.runners.interactive.testing.mock_ipython import mock_get_ipython
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
class Record(NamedTuple):
order_id: int
product_id: int
quantity: int
def windowed_value(e):
from apache_beam.transforms.window import GlobalWindow
return WindowedValue(e, 1, [GlobalWindow()])
class ParseToDataframeTest(unittest.TestCase):
def test_parse_windowedvalue(self):
"""Tests that WindowedValues are supported but not present.
"""
els = [windowed_value(('a', 2)), windowed_value(('b', 3))]
actual_df = utils.elements_to_df(els, include_window_info=False)
expected_df = pd.DataFrame([['a', 2], ['b', 3]], columns=[0, 1])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_windowedvalue_with_window_info(self):
"""Tests that WindowedValues are supported and have their own columns.
"""
els = [windowed_value(('a', 2)), windowed_value(('b', 3))]
actual_df = utils.elements_to_df(els, include_window_info=True)
expected_df = pd.DataFrame(
[['a', 2, int(1e6), els[0].windows, els[0].pane_info],
['b', 3, int(1e6), els[1].windows, els[1].pane_info]],
columns=[0, 1, 'event_time', 'windows', 'pane_info'])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_windowedvalue_with_dicts(self):
"""Tests that dicts play well with WindowedValues.
"""
els = [
windowed_value({
'b': 2, 'd': 4
}),
windowed_value({
'a': 1, 'b': 2, 'c': 3
})
]
actual_df = utils.elements_to_df(els, include_window_info=True)
expected_df = pd.DataFrame(
[[np.nan, 2, np.nan, 4, int(1e6), els[0].windows, els[0].pane_info],
[1, 2, 3, np.nan, int(1e6), els[1].windows, els[1].pane_info]],
columns=['a', 'b', 'c', 'd', 'event_time', 'windows', 'pane_info'])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_dataframes(self):
"""Tests that it correctly parses a DataFrame.
"""
deferred = to_dataframe(beam.Pipeline() | beam.Create([Record(0, 0, 0)]))
els = [windowed_value(pd.DataFrame(Record(n, 0, 0))) for n in range(10)]
actual_df = utils.elements_to_df(
els, element_type=deferred._expr.proxy()).reset_index(drop=True)
expected_df = pd.concat([e.value for e in els], ignore_index=True)
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_parse_series(self):
"""Tests that it correctly parses a Pandas Series.
"""
deferred = to_dataframe(beam.Pipeline()
| beam.Create([Record(0, 0, 0)]))['order_id']
els = [windowed_value(pd.Series([n])) for n in range(10)]
actual_df = utils.elements_to_df(
els, element_type=deferred._expr.proxy()).reset_index(drop=True)
expected_df = pd.concat([e.value for e in els], ignore_index=True)
pd.testing.assert_series_equal(actual_df, expected_df)
class ToElementListTest(unittest.TestCase):
def test_test_stream_payload_events(self):
"""Tests that the to_element_list can limit the count in a single bundle."""
coder = coders.FastPrimitivesCoder()
def reader():
element_payload = [
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(
WindowedValueHolder(WindowedValue(e, 0, []))),
timestamp=Timestamp.of(0).micros) for e in range(10)
]
event = TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=element_payload))
yield event
# The reader creates 10 elements in a single TestStreamPayload but we limit
# the number of elements read to 5 here. This tests that the to_element_list
# can limit the number of elements in a single bundle.
elements = utils.to_element_list(
reader(), coder, include_window_info=False, n=5)
self.assertSequenceEqual(list(elements), list(range(5)))
def test_element_limit_count(self):
"""Tests that the to_element_list can limit the count."""
elements = utils.to_element_list(
iter(range(10)), None, include_window_info=False, n=5)
self.assertSequenceEqual(list(elements), list(range(5)))
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
class IPythonLogHandlerTest(unittest.TestCase):
def setUp(self):
utils.register_ipython_log_handler()
self._interactive_root_logger = logging.getLogger(
'apache_beam.runners.interactive')
def test_ipython_log_handler_not_double_registered(self):
utils.register_ipython_log_handler()
ipython_log_handlers = list(
filter(
lambda x: isinstance(x, utils.IPythonLogHandler),
[handler for handler in self._interactive_root_logger.handlers]))
self.assertEqual(1, len(ipython_log_handlers))
@patch('apache_beam.runners.interactive.utils.IPythonLogHandler.emit')
def test_default_logging_level_is_info(self, mock_emit):
# By default the logging level of loggers and log handlers are NOTSET. Also,
# the propagation is default to true for all loggers. In this scenario, all
# loggings from child loggers will be propagated to the interactive "root"
# logger which is set to INFO level that gets handled by the sole log
# handler IPythonLogHandler which is set to NOTSET. The effect will be
# everything >= info level will be logged through IPython.core.display to
# all frontends connected to current kernel.
dummy_logger = logging.getLogger('apache_beam.runners.interactive.dummy1')
dummy_logger.info('info')
mock_emit.assert_called_once()
dummy_logger.debug('debug')
# Emit is not called, so it's still called once.
mock_emit.assert_called_once()
@patch('apache_beam.runners.interactive.utils.IPythonLogHandler.emit')
def test_child_module_logger_can_override_logging_level(self, mock_emit):
# When a child logger's logging level is configured to something that is not
# NOTSET, it takes back the logging control from the interactive "root"
# logger by not propagating anything.
dummy_logger = logging.getLogger('apache_beam.runners.interactive.dummy2')
dummy_logger.setLevel(logging.DEBUG)
mock_emit.assert_not_called()
dummy_logger.debug('debug')
# Because the dummy child logger is configured to log at DEBUG level, it
# now propagates DEBUG loggings to the interactive "root" logger.
mock_emit.assert_called_once()
# When the dummy child logger is configured to log at CRITICAL level, it
# will only propagate CRITICAL loggings to the interactive "root" logger.
dummy_logger.setLevel(logging.CRITICAL)
# Error loggings will not be handled now.
dummy_logger.error('error')
# Emit is not called, so it's still called once.
mock_emit.assert_called_once()
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
@pytest.mark.skipif(
not ie.current_env().is_interactive_ready,
reason='[interactive] dependency is not installed.')
class ProgressIndicatorTest(unittest.TestCase):
def setUp(self):
ie.new_env()
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_progress_in_plain_text_when_not_in_notebook(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = False
with patch('IPython.core.display.display') as mocked_display:
@utils.progress_indicated
def progress_indicated_dummy():
mocked_display.assert_any_call('Processing...')
progress_indicated_dummy()
mocked_display.assert_any_call('Done.')
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_progress_in_HTML_JS_when_in_notebook(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = True
with patch('IPython.core.display.HTML') as mocked_html,\
patch('IPython.core.display.Javascript') as mocked_js:
with utils.ProgressIndicator('enter', 'exit'):
mocked_html.assert_called()
mocked_js.assert_called()
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
class MessagingUtilTest(unittest.TestCase):
SAMPLE_DATA = {'a': [1, 2, 3], 'b': 4, 'c': '5', 'd': {'e': 'f'}}
def setUp(self):
ie.new_env()
def test_as_json_decorator(self):
@utils.as_json
def dummy():
return MessagingUtilTest.SAMPLE_DATA
# As of Python 3.6, for the CPython implementation of Python,
# dictionaries remember the order of items inserted.
self.assertEqual(json.loads(dummy()), MessagingUtilTest.SAMPLE_DATA)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
wanglei828/apollo | modules/tools/navigation/planning/path_decider.py | 3 | 8453 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
from reference_path import ReferencePath
from local_path import LocalPath
from numpy.polynomial.polynomial import polyval
class PathDecider:
def __init__(self, enable_routing_aid, enable_nudge, enable_change_lane):
self.MINIMUM_PATH_LENGTH = 5
self.MAX_LAT_CHANGE = 0.1
self.last_init_lat = None
self.ref = ReferencePath()
self.enable_routing_aid = enable_routing_aid
self.enable_nudge = enable_nudge
self.enable_change_lane = enable_change_lane
self.path_range = 10
def get_path_by_lm(self, mobileye, adv):
return self.ref.get_ref_path_by_lm(mobileye, adv)
def get_path_by_lmr(self, perception, routing, adv):
path_x, path_y, path_len = self.ref.get_ref_path_by_lmr(perception,
routing,
adv)
if self.enable_nudge:
path_x, path_y, path_len = self.nudge_process(path_x, path_y,
path_len)
return path_x, path_y, path_len
def nudge_process(self, final_path, obstacle_decider):
obstacle_decider.process_path_obstacle(final_path)
left_dist = 999
right_dist = 999
for obs_id, lat_dist in obstacle_decider.obstacle_lat_dist.items():
if lat_dist < 0:
left_dist = lat_dist
else:
right_dist = lat_dist
print left_dist, right_dist
return final_path
def get(self, perception, routing, adv):
if self.enable_routing_aid:
return self.get_path_by_lmr(perception, routing, adv)
else:
return self.get_path_by_lm(perception, adv)
def get_path(self, perception, routing, adv, obstacle_decider):
self.path_range = self._get_path_range(adv.speed_mps)
if self.enable_routing_aid and adv.is_ready():
return self.get_routing_path(perception, routing, adv,
obstacle_decider)
else:
return self.get_lane_marker_path(perception)
def get_routing_path(self, perception, routing, adv, obstacle_decider):
routing_path = routing.get_local_path(adv, self.path_range + 1)
perception_path = perception.get_lane_marker_middle_path(
self.path_range + 1)
quality = perception.right_lm_quality + perception.left_lm_quality
quality = quality / 2.0
if routing_path.range() >= self.path_range \
and routing.human \
and routing_path.init_y() <= 3:
# "routing only"
init_y_routing = routing_path.init_y()
init_y = self._smooth_init_y(init_y_routing)
routing_path.shift(init_y - routing_path.init_y())
if self.enable_nudge:
obstacle_decider.process_path_obstacle(routing_path)
left_nudgable, right_nudgable = \
obstacle_decider.get_adv_left_right_nudgable_dist(
routing_path)
nudge_dist = obstacle_decider.get_nudge_distance(left_nudgable,
right_nudgable)
smoothed_nudge_dist = self._smooth_init_y(nudge_dist)
if smoothed_nudge_dist != 0:
print smoothed_nudge_dist
routing_path.shift(smoothed_nudge_dist)
return routing_path
init_y = self._smooth_init_y(perception_path.init_y())
if routing_path.range() < self.path_range:
# "perception only"
perception_path.shift(init_y - perception_path.init_y())
return perception_path
# "hybrid"
init_y = perception_path.init_y()
routing_path.shift(init_y - routing_path.init_y())
perception_path.shift(init_y - routing_path.init_y())
routing_path.merge(perception_path, quality)
return routing_path
def get_lane_marker_path(self, perception):
path = perception.get_lane_marker_middle_path(perception,
self.path_range)
init_y = path.init_y()
smoothed_init_y = self._smooth_init_y(init_y)
path.shift(smoothed_init_y - init_y)
return path
def _get_path_range(self, speed_mps):
path_length = self.MINIMUM_PATH_LENGTH
current_speed = speed_mps
if current_speed is not None:
if path_length < current_speed * 2:
path_length = math.ceil(current_speed * 2)
return int(path_length)
def _smooth_init_y(self, init_y):
if init_y > 0.2:
init_y = 0.2
if init_y < -0.2:
init_y = -0.2
return init_y
if __name__ == "__main__":
import rospy
from std_msgs.msg import String
import matplotlib.pyplot as plt
from modules.localization.proto import localization_pb2
from modules.canbus.proto import chassis_pb2
from ad_vehicle import ADVehicle
import matplotlib.animation as animation
from modules.drivers.proto import mobileye_pb2
from provider_routing import RoutingProvider
from provider_mobileye import MobileyeProvider
def localization_callback(localization_pb):
ad_vehicle.update_localization(localization_pb)
def routing_callback(routing_str):
routing.update(routing_str)
def chassis_callback(chassis_pb):
ad_vehicle.update_chassis(chassis_pb)
def mobileye_callback(mobileye_pb):
mobileye.update(mobileye_pb)
mobileye.process_lane_markers()
def update(frame):
if not ad_vehicle.is_ready():
return
left_path = mobileye.get_left_lane_marker_path()
left_x, left_y = left_path.get_xy()
left_lm.set_xdata(left_x)
left_lm.set_ydata(left_y)
right_path = mobileye.get_right_lane_marker_path()
right_x, right_y = right_path.get_xy()
right_lm.set_xdata(right_x)
right_lm.set_ydata(right_y)
middle_path = mobileye.get_lane_marker_middle_path(128)
middle_x, middle_y = middle_path.get_xy()
middle_lm.set_xdata(middle_x)
middle_lm.set_ydata(middle_y)
fpath = path_decider.get_path(mobileye, routing, ad_vehicle)
fpath_x, fpath_y = fpath.get_xy()
final_path.set_xdata(fpath_x)
final_path.set_ydata(fpath_y)
# ax.autoscale_view()
# ax.relim()
ad_vehicle = ADVehicle()
routing = RoutingProvider()
mobileye = MobileyeProvider()
path_decider = PathDecider(True, False, False)
rospy.init_node("path_decider_debug", anonymous=True)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/navigation/routing',
String, routing_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
left_lm, = ax.plot([], [], 'b-')
right_lm, = ax.plot([], [], 'b-')
middle_lm, = ax.plot([], [], 'k-')
final_path, = ax.plot([], [], 'r-')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.set_xlim([-2, 128])
ax.set_ylim([-5, 5])
# ax2.axis('equal')
plt.show()
| apache-2.0 |
UNR-AERIAL/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
msingh172/pylearn2 | pylearn2/models/svm.py | 21 | 3386 | """Wrappers for SVM models."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import warnings
try:
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
except ImportError:
warnings.warn("Could not import sklearn.")
class OneVsRestClassifier(object):
"""
Dummy replacement for `sklearn.multiclass.OneVsRestClassifier`.
Parameters
----------
estimator : see `sklearn` doc.
See `sklearn` doc.
Notes
-----
This class is a dummy class included so that sphinx
can import DenseMulticlassSVM and document it even
when sklearn is not installed.
"""
def __init__(self, estimator):
raise RuntimeError("sklearn not available.")
class DenseMulticlassSVM(OneVsRestClassifier):
"""
sklearn does very different things behind the scenes depending
upon the exact identity of the class you use. The only way to
get an SVM implementation that works with dense data is to use
the `SVC` class, which implements one-against-one
classification. This wrapper uses it to implement one-against-
rest classification, which generally works better in my
experiments.
To avoid duplicating the training data, use only numpy ndarrays
whose tags.c_contigous flag is true, and which are in float64
format.
Parameters
----------
C : float
SVM regularization parameter.
See SVC.__init__ for details.
kernel : str
Type of kernel to use.
See SVC.__init__ for details.
gamma : float
Optional parameter of kernel.
See SVC.__init__ for details.
coef0 : float
Optional parameter of kernel.
See SVC.__init__ for details.
degree : int
Degree of kernel, if kernel is polynomial.
See SVC.__init__ for details.
"""
def __init__(self, C, kernel='rbf', gamma=1.0, coef0=1.0, degree=3):
estimator = SVC(C=C, kernel=kernel, gamma=gamma, coef0=coef0,
degree=degree)
super(DenseMulticlassSVM, self).__init__(estimator)
def fit(self, X, y):
"""
Fit underlying estimators.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
super(DenseMulticlassSVM, self).fit(X, y)
return self
def decision_function(self, X):
"""
Returns the distance of each sample from the decision boundary for
each class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A 2D ndarray with each row containing the input features for one
example.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
return np.column_stack([estimator.decision_function(X)
for estimator in self.estimators_])
| bsd-3-clause |
stkeky/valar-morghulis | examples/first_keras_example.py | 1 | 2721 | from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
import numpy as np
from keras.utils import np_utils
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout
data = np.array([
[0, 0, 0],
[1, 1, 0],
[2, 2, 0],
[3, 3, 0],
[4, 4, 0],
[5, 5, 1],
[6, 6, 1],
[7, 7, 1],
[8, 8, 1],
[9, 9, 1],
])
# Set data
data = np.vstack((data, data, data, data)) # Just for sufficient input
data = pd.DataFrame(data, columns=['x', 'y', 'class'])
# Split X and y
X = data.iloc[:, :-1].values
y = data.iloc[:, -1:].values
# Get dimensions of input and output
dimof_input = X.shape[1]
dimof_output = np.max(y) + 1
print('dimof_input: ', dimof_input)
print('dimof_output: ', dimof_output)
# Set y categorical
y = np_utils.to_categorical(y, dimof_output)
# Set constants
batch_size = 128
dimof_middle = 100
dropout = 0.2
countof_epoch = 1000
verbose = 0
print('batch_size: ', batch_size)
print('dimof_middle: ', dimof_middle)
print('dropout: ', dropout)
print('countof_epoch: ', countof_epoch)
print('verbose: ', verbose)
print()
# Set model
model = Sequential()
model.add(Dense(dimof_middle, input_dim=dimof_input, init='uniform', activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(dimof_middle, init='uniform', activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(dimof_output, init='uniform', activation='softmax'))
model.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])
model.summary()
# Train
model.fit(
X, y,
validation_split=0.2,
batch_size=batch_size, nb_epoch=countof_epoch, verbose=verbose)
# Evaluate
loss, accuracy = model.evaluate(X, y, verbose=verbose)
print('loss: ', loss)
print('accuracy: ', accuracy)
print()
# Predict
# model.predict_classes(X, verbose=verbose)
print('prediction of [1, 1]: ', model.predict_classes(np.array([[1, 1]]), verbose=verbose))
print('prediction of [8, 8]: ', model.predict_classes(np.array([[8, 8]]), verbose=verbose))
print('prediction of [5, 5]: ', model.predict_classes(np.array([[5, 5]]), verbose=verbose))
print('prediction of [6, 6]: ', model.predict_classes(np.array([[6, 6]]), verbose=verbose))
print('prediction of [2, 10]: ', model.predict_classes(np.array([[2, 10]]), verbose=verbose))
# Plot
sns.lmplot('x', 'y', data, 'class', fit_reg=False).set(title='Data')
data_ = data.copy()
data_['class'] = model.predict_classes(X, verbose=0)
sns.lmplot('x', 'y', data_, 'class', fit_reg=False).set(title='Trained Result')
data_['class'] = ['Error' if is_error else 'Non Error' for is_error in data['class'] != data_['class']]
sns.lmplot('x', 'y', data_, 'class', fit_reg=False).set(title='Errors')
None
| mit |
jmetzen/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
musically-ut/statsmodels | statsmodels/graphics/tukeyplot.py | 33 | 2473 | from statsmodels.compat.python import range
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.lines as lines
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
| bsd-3-clause |
jnez71/demos | signals/fourier_transform.py | 1 | 1952 | #!/usr/bin/env python3
"""
Using a typical FFT routine and showing the principle
behind the DTFT computation.
"""
import numpy as np
from matplotlib import pyplot
##################################################
# Efficient practical usage
def fft(values, dt):
freqs = np.fft.rfftfreq(len(values), dt)
coeffs = np.sqrt(2.0/len(values)) * np.fft.rfft(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2.0) # don't "double count" the DC alias
return (freqs, coeffs)
# Working principle
def dtft(values, dt):
times = dt * np.arange(len(values))
nyquist = 1.0/(2.0*dt)
df = nyquist / (len(values)/2.0)
freqs = np.arange(0.0, nyquist+df, df)
# (rad/s)/Hz all f*t products
dtft_matrix = np.exp(-1j * (2.0*np.pi) * np.outer(freqs, times))
coeffs = np.sqrt(2.0/len(values)) * dtft_matrix.dot(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2.0) # don't "double count" the DC alias
return (freqs, coeffs)
##################################################
def function(time):
w = 20*np.pi
value = 0.0
for k in range(5):
value += (k+1)*np.cos((k*w)*time)
return value
dt = 0.001
times = np.arange(0.0, 0.2, dt)
values = function(times)
##################################################
fft_freqs, fft_coeffs = fft(values, dt)
dtft_freqs, dtft_coeffs = dtft(values, dt)
assert np.allclose(fft_freqs, dtft_freqs)
assert np.allclose(fft_coeffs, dtft_coeffs)
##################################################
# Demonstrate Parseval's theorem
print(np.linalg.norm(values))
print(np.linalg.norm(dtft_coeffs))
##################################################
fig = pyplot.figure()
ax = fig.add_subplot(2, 1, 1)
ax.plot(times, values)
ax.set_xlabel("Time (s)", fontsize=16)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.scatter(dtft_freqs, np.abs(dtft_coeffs))
ax.set_xlabel("Freq (Hz)", fontsize=16)
ax.grid(True)
pyplot.show()
| mit |
mne-tools/mne-tools.github.io | 0.20/_downloads/ad24d853f2b5d1e965cb721301884c03/plot_40_visualize_raw.py | 4 | 8744 | # -*- coding: utf-8 -*-
"""
.. _tut-visualize-raw:
Built-in plotting methods for Raw objects
=========================================
This tutorial shows how to plot continuous data as a time series, how to plot
the spectral density of continuous data, and how to plot the sensor locations
and projectors stored in :class:`~mne.io.Raw` objects.
.. contents:: Page contents
:local:
:depth: 2
As usual we'll start by importing the modules we need, loading some
:ref:`example data <sample-dataset>`, and cropping the :class:`~mne.io.Raw`
object to just 60 seconds before loading it into RAM to save memory:
"""
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# We've seen in :ref:`a previous tutorial <tut-raw-class>` how to plot data
# from a :class:`~mne.io.Raw` object using :doc:`matplotlib
# <matplotlib:index>`, but :class:`~mne.io.Raw` objects also have several
# built-in plotting methods:
#
# - :meth:`~mne.io.Raw.plot`
# - :meth:`~mne.io.Raw.plot_psd`
# - :meth:`~mne.io.Raw.plot_psd_topo`
# - :meth:`~mne.io.Raw.plot_sensors`
# - :meth:`~mne.io.Raw.plot_projs_topomap`
#
# The first three are discussed here in detail; the last two are shown briefly
# and covered in-depth in other tutorials.
#
#
# Interactive data browsing with ``Raw.plot()``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The :meth:`~mne.io.Raw.plot` method of :class:`~mne.io.Raw` objects provides
# a versatile interface for exploring continuous data. For interactive viewing
# and data quality checking, it can be called with no additional parameters:
raw.plot()
###############################################################################
# It may not be obvious when viewing this tutorial online, but by default, the
# :meth:`~mne.io.Raw.plot` method generates an *interactive* plot window with
# several useful features:
#
# - It spaces the channels equally along the y-axis.
#
# - 20 channels are shown by default; you can scroll through the channels
# using the :kbd:`↑` and :kbd:`↓` arrow keys, or by clicking on the
# colored scroll bar on the right edge of the plot.
#
# - The number of visible channels can be adjusted by the ``n_channels``
# parameter, or changed interactively using :kbd:`page up` and :kbd:`page
# down` keys.
#
# - You can toggle the display to "butterfly" mode (superimposing all
# channels of the same type on top of one another) by pressing :kbd:`b`,
# or start in butterfly mode by passing the ``butterfly=True`` parameter.
#
# - It shows the first 10 seconds of the :class:`~mne.io.Raw` object.
#
# - You can shorten or lengthen the window length using :kbd:`home` and
# :kbd:`end` keys, or start with a specific window duration by passing the
# ``duration`` parameter.
#
# - You can scroll in the time domain using the :kbd:`←` and
# :kbd:`→` arrow keys, or start at a specific point by passing the
# ``start`` parameter. Scrolling using :kbd:`shift`:kbd:`→` or
# :kbd:`shift`:kbd:`←` scrolls a full window width at a time.
#
# - It allows clicking on channels to mark/unmark as "bad".
#
# - When the plot window is closed, the :class:`~mne.io.Raw` object's
# ``info`` attribute will be updated, adding or removing the newly
# (un)marked channels to/from the :class:`~mne.Info` object's ``bads``
# field (A.K.A. ``raw.info['bads']``).
#
# .. TODO: discuss annotation snapping in the below bullets
#
# - It allows interactive :term:`annotation <annotations>` of the raw data.
#
# - This allows you to mark time spans that should be excluded from future
# computations due to large movement artifacts, line noise, or other
# distortions of the signal. Annotation mode is entered by pressing
# :kbd:`a`. See :ref:`annotations-tutorial` for details.
#
# - It automatically applies any :term:`projectors <projector>` before plotting
# the data.
#
# - These can be enabled/disabled interactively by clicking the ``Proj``
# button at the lower right corner of the plot window, or disabled by
# default by passing the ``proj=False`` parameter. See
# :ref:`tut-projectors-background` for more info on projectors.
#
# These and other keyboard shortcuts are listed in the Help window, accessed
# through the ``Help`` button at the lower left corner of the plot window.
# Other plot properties (such as color of the channel traces, channel order and
# grouping, simultaneous plotting of :term:`events`, scaling, clipping,
# filtering, etc.) can also be adjusted through parameters passed to the
# :meth:`~mne.io.Raw.plot` method; see the docstring for details.
#
#
# Plotting spectral density of continuous data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# To visualize the frequency content of continuous data, the
# :class:`~mne.io.Raw` object provides a :meth:`~mne.io.Raw.plot_psd` to plot
# the `spectral density`_ of the data.
raw.plot_psd(average=True)
###############################################################################
# If the data have been filtered, vertical dashed lines will automatically
# indicate filter boundaries. The spectrum for each channel type is drawn in
# its own subplot; here we've passed the ``average=True`` parameter to get a
# summary for each channel type, but it is also possible to plot each channel
# individually, with options for how the spectrum should be computed,
# color-coding the channels by location, and more. For example, here is a plot
# of just a few sensors (specified with the ``picks`` parameter), color-coded
# by spatial location (via the ``spatial_colors`` parameter, see the
# documentation of :meth:`~mne.io.Raw.plot_psd` for full details):
midline = ['EEG 002', 'EEG 012', 'EEG 030', 'EEG 048', 'EEG 058', 'EEG 060']
raw.plot_psd(picks=midline)
###############################################################################
# Alternatively, you can plot the PSD for every sensor on its own axes, with
# the axes arranged spatially to correspond to sensor locations in space, using
# :meth:`~mne.io.Raw.plot_psd_topo`:
raw.plot_psd_topo()
###############################################################################
# This plot is also interactive; hovering over each "thumbnail" plot will
# display the channel name in the bottom left of the plot window, and clicking
# on a thumbnail plot will create a second figure showing a larger version of
# the selected channel's spectral density (as if you had called
# :meth:`~mne.io.Raw.plot_psd` on that channel).
#
# By default, :meth:`~mne.io.Raw.plot_psd_topo` will show only the MEG
# channels if MEG channels are present; if only EEG channels are found, they
# will be plotted instead:
raw.copy().pick_types(meg=False, eeg=True).plot_psd_topo()
###############################################################################
# Plotting sensor locations from ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The channel locations in a :class:`~mne.io.Raw` object can be easily plotted
# with the :meth:`~mne.io.Raw.plot_sensors` method. A brief example is shown
# here; notice that channels in ``raw.info['bads']`` are plotted in red. More
# details and additional examples are given in the tutorial
# :ref:`tut-sensor-locations`.
raw.plot_sensors(ch_type='eeg')
###############################################################################
# .. _`tut-section-raw-plot-proj`:
#
# Plotting projectors from ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# As seen in the output of :meth:`mne.io.read_raw_fif` above, there are
# :term:`projectors <projector>` included in the example :class:`~mne.io.Raw`
# file (representing environmental noise in the signal, so it can later be
# "projected out" during preprocessing). You can visualize these projectors
# using the :meth:`~mne.io.Raw.plot_projs_topomap` method. By default it will
# show one figure per channel type for which projectors are present, and each
# figure will have one subplot per projector. The three projectors in this file
# were only computed for magnetometers, so one figure with three subplots is
# generated. More details on working with and plotting projectors are given in
# :ref:`tut-projectors-background` and :ref:`tut-artifact-ssp`.
raw.plot_projs_topomap(colorbar=True)
###############################################################################
# .. LINKS
#
# .. _spectral density: https://en.wikipedia.org/wiki/Spectral_density
| bsd-3-clause |
SpatialTranscriptomicsResearch/st_analysis | scripts/merge_replicates.py | 1 | 2837 | #! /usr/bin/env python
"""
This scripts merges two ST datasets (technical replicates
from the same individual).
It keeps only the genes that are in both datasets
(summing their counts or averaging them).
Assumes that both matrices have the same order of genes and spots
and that the spots of both datasets are located in the same part of the tissue (aligned).
The spots coordinates of the merged dataset will be the ones present in the first
dataset.
merge_replicates.py --input-files datasetA.tsv datasetB.tsv --output merged.tsv
@Author Jose Fernandez Navarro <jose.fernandez.navarro@scilifelab.se>
"""
import argparse
import sys
import os
import pandas as pd
from stanalysis.preprocessing import merge_datasets
def main(input_files, outfile, merging_action):
if len(input_files) != 2 or any([not os.path.isfile(f) for f in input_files]):
sys.stderr.write("Error, input file not present or invalid format\n")
sys.exit(1)
if not outfile:
outfile = "merged.tsv"
# Read the data frames (genes as columns)
counts_tableA = pd.read_table(input_files[0], sep="\t", header=0, index_col=0)
counts_tableB = pd.read_table(input_files[1], sep="\t", header=0, index_col=0)
num_spotsA = len(counts_tableA.index)
num_spotsB = len(counts_tableB.index)
if num_spotsA != num_spotsB:
sys.stderr.write("Error, datasets have different number of spots "
"{} and {}\n".format(num_spotsA, num_spotsB))
sys.exit(1)
print("Merging dataset {} with {} spots and {} genes with "
"dataset {} with {} spots and {} genes".format(input_files[0], input_files[1],
num_spotsA, num_spotsB))
# Merge the two datasets
merged_table = merge_datasets(counts_tableA, counts_tableB, merging_action)
# Write merged table
merged_table.to_csv(outfile, sep='\t')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input-files", required=True, nargs='+', type=str,
help="Two ST datasets (matrix of counts in TSV format)")
parser.add_argument("--outfile", help="Name of the output file")
parser.add_argument("--merging-action", default="Sum", metavar="[STR]",
type=str, choices=["Sum", "Median"],
help="How to merge the counts of common genes in both datasets.\n"
"Sum will sum the counts of both and Median will sum the counts and "
"divided by 2 (default: %(default)s).")
args = parser.parse_args()
main(args.input_files, args.outfile, args.merging_action)
| mit |
dialounke/pylayers | pylayers/network/examples/ex_network.py | 3 | 3774 | from pylayers.gis.layout import *
from pylayers.mobility.agent import *
from pylayers.network.network import *
from pylayers.network.emsolver import *
import matplotlib.pyplot as plt
# ## Layout Creation
#
# First we need to load a layout structure.
#
# A place where nodes from a network can move and communicate
# <codecell>
L=Layout('TA-Office.ini')
try:
L.dumpr() # build 's'tructure, 't'opological and 'r'oom graphs
except:
L.build()
L.dumpw()
fig,ax=L.showG('s')
# <markdowncell>
# ## Network Creation
#
# Now we must instanciate a void Network.
#
# A Network will gather all radio acces technology RAT from all agents
# The Network class inherits from networkx
# <codecell>
N=Network()
# <markdowncell>
# ## Agents Creation
#
# Then we need to instanciate agents ( nodes ) into that layout.
# An agent is either a moving person or a static acces point which have one or more (RAT).
#
# - For that tutorial we create 3 agents in room **0 ,3 and 5** respectively.
# - We suppose they communicate on the same and unique RAT : **'rat1'**
# - Obviously agent will evolve into the previously created network **N**.
#
# <codecell>
Ag=[]
nb_agent = 3
room_init=[0,3,5]
pdb.set_trace()
for na in range(nb_agent):
Ag.append(
Agent(ID = na,
Layout = L,
net = N,
roomId = room_init[na],
RAT=['rat1']
)
)
# <markdowncell>
# ## Connection Creation
#
# Now we can create the Network. It means, we connect agents all together, according to their common RAT.
# <codecell>
N.create()
# show the Layout
fig2,ax2=L.showG('')
# show the network layer
N.show(fig = fig2, legend = True)
# <markdowncell>
# ## Nodes and edges
# <markdowncell>
# Because Network inherits of networkx, you can have information about nodes and edges as such
# <markdowncell>
# The node is a dictionnary whi containts the folowing keys :
#
# * 'PN' : Its Personnal Network ( described in the following)
# * 'RAT' : A list of RAT of which it belongs
# * 'p' : Its true position
# * 'pe' : Its estimated position if it has computed it by itself ( cf. location tutorial - IN CONSTRUCTION -)
# * 't' : A time stamp
# * 'type': Its type ( 'ag' : for agent or 'ap' for access point )
#
# example with node '0'
# <codecell>
N.node[0]
# <markdowncell>
# The edge is a dictionnary of dictionnary.
# Each RAT is a key of the first dictionnary.
# Location dependent parameter (LDP) is the key of the second.
#
# example with edge 0-1
# <codecell>
N.edge[0][1]
# <markdowncell>
# Obviously, LDPs values are void, because no location depend parameter have been still computed.
# <markdowncell>
# # Compute Location dependent parameters (LDPs)
#
# LDPs are radio measurements bewteen agents. It could be either:
#
# 1. Time of Arrival (**TOA**)
# 2. Received Power (**Pr**)
# <markdowncell>
# ## EMS initialization
#
# We first initialize the electromagnetic solver (EMS) for the given Network N.
# We must give it a Layout strucuture, in order to be able to compute accurate LDP.
#
# .. note:: it could have be made during the Network instantiation
# <codecell>
N.EMS=EMSolver(L)
# <markdowncell>
# ## Computation
#
# The we compute *TOA* and *received power* dependent parameters, and display for link 0-1
# <codecell>
for ldp in ['Pr','TOA']:
N.compute_LDPs(N.nodes(),RAT='rat1',LDP=ldp)
N.edge[0][1]
# <markdowncell>
# # tests on PN
# <codecell>
N.update_PN()
N.node[0]['PN'].node[0]['pe']=np.array((4,4))
N.node[0]['PN'].node[1]['pe']=np.array((8,8))
N.node[0]['PN'].node[2]['pe']=np.array((30,8))
fig3,ax3=L.showG('')
# show the network layer
N.node[0]['PN'].show(fig = fig3, legend = True)
# <codecell>
plt.show()
| mit |
arjoly/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
liberatorqjw/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
paultopia/auto-sklearn | autosklearn/data/data_manager.py | 5 | 3374 | import numpy as np
import scipy.sparse
from ParamSklearn.implementations.OneHotEncoder import OneHotEncoder
from autosklearn.data import util as data_util
class DataManager(object):
def __init__(self):
self._data = dict()
self._info = dict()
@property
def data(self):
return self._data
@property
def info(self):
return self._info
def perform1HotEncoding(self):
if not hasattr(self, "data"):
raise ValueError("perform1HotEncoding can only be called when "
"data is loaded")
if hasattr(self, "encoder_"):
raise ValueError("perform1HotEncoding can only be called on "
"non-encoded data.")
self.encoder_ = None
sparse = True if self.info['is_sparse'] == 1 else False
has_missing = True if self.info['has_missing'] else False
to_encode = ['categorical']
if has_missing:
to_encode += ['binary']
encoding_mask = [feat_type.lower() in to_encode
for feat_type in self.feat_type]
categorical = [True if feat_type.lower() == 'categorical' else False
for feat_type in self.feat_type]
predicted_RAM_usage = float(
data_util.predict_RAM_usage(
self.data['X_train'], categorical)) / 1024 / 1024
if predicted_RAM_usage > 1000:
sparse = True
if any(encoding_mask):
encoder = OneHotEncoder(categorical_features=encoding_mask,
dtype=np.float32, sparse=sparse)
self.data['X_train'] = encoder.fit_transform(self.data['X_train'])
if 'X_valid' in self.data:
self.data['X_valid'] = encoder.transform(self.data['X_valid'])
if 'X_test' in self.data:
self.data['X_test'] = encoder.transform(self.data['X_test'])
if not sparse and scipy.sparse.issparse(self.data['X_train']):
self.data['X_train'] = self.data['X_train'].todense()
if 'X_valid' in self.data:
self.data['X_valid'] = self.data['X_valid'].todense()
if 'X_test' in self.data:
self.data['X_test'] = self.data['X_test'].todense()
self.encoder_ = encoder
self.info['is_sparse'] = 1 if sparse else 0
def __repr__(self):
return "DataManager : " + self.basename
def __str__(self):
val = "DataManager : " + self.basename + "\ninfo:\n"
for item in self.info:
val = val + "\t" + item + " = " + str(self.info[item]) + "\n"
val = val + "data:\n"
for subset in self.data:
val = val + "\t%s = %s %s %s\n" % (subset, type(self.data[subset]), \
str(self.data[subset].shape),
str(self.data[subset].dtype))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + "\tdensity: %f\n" % \
(float(len(self.data[subset].data)) /
self.data[subset].shape[0] /
self.data[subset].shape[1])
val = val + "feat_type:\tarray" + str(self.feat_type.shape) + "\n"
return val | bsd-3-clause |
mtwharmby/lucky | Lucky/src_Mike_GUI_Total/trials/PreLucky_variant.py | 1 | 6919 | import math
pi = math.pi
##from math import pi
##Constants:
h=6.626*10**(-34)
c=3*10**8
Kb=1.38*10**(-23)
##Session where I define all the function needed
def Planck(x,e,T):
a=np.expm1(0.0144043/(x*10**(-9))/T)
P=e/(x*10**(-9))**5*3.74691*10**(-16)*1/(a-1)
return P
#Defined Wien function
def Wien(Int,x):
W=Kb/h/c*np.log((x*10**(-9))**5*Int/2/pi/h/c**2)
return W
#Defined two-colour function
def TwoCol(Int,x):
count=len(x)
delta=200
k=count-delta
TTwo=[]*count
#while (i<k):
for i in range (0,k):#(0,count-1):
f1=1/(x[i]*10**(-9))
f2=1/(x[i+delta]*10**(-9))
i1=np.log(Int[i]/2/pi/h/c**2/f1**5)*Kb/h/c
i2=np.log(Int[i+delta]/2/pi/h/c**2/f2**5)*Kb/h/c
TTwo.append(abs((f2-f1)/(i2-i1)))
for i in range (k,count):
a = float('nan')
TTwo.append(a)
return TTwo
#Defined linear fit for Wien function
def FWien(x,e,T):
a=1/T
b=Kb/h/c*np.log(e)
W=b-a*x
return W
#Defined Gauss fit
def gaus(x, a, x0, sigma):
return np.real(a*np.exp(-(x-x0)**2/(2*sigma**2)))
#Programma che carica un file di temperatura (raw) lo normalizza e me lo
#grafica in un plot per ottenere la mia funzione di Planck
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
import sys
#standard integration range(i) 315-->1146
start = 315
end= 800
#Session where I load the file I need from the folder where the software is
x,y=np.loadtxt('../../test/Lucky/testData/T_62_1.txt',unpack=True) ##Raw file
xC,yC=np.loadtxt('../../test/Lucky/testData/Calib.txt',unpack=True) ##Calib file
#Session with all the normalization for the collected data
P=Planck(x,1,2436) ##Ideal Planck
#TPlanck ID24 =2436
#TPlanck I15 = 3200
Norm=y/yC*P #Normalization file
invX=1/x*10**9 #Inverse of wavelength for Wien function
W=Wien(Norm,x)#Define Wien function
#test=W
#for w in W:
# test[numpy.isnan(w)]=[]
Two=TwoCol(Norm,x)
#array = np.array(a,dtype='float')
Two2=np.array(Two,dtype='float')
TwoInt=Two2[start:end]
bins=range(1000,3000,1)
hist=np.histogram(TwoInt,bins,density=False)
freq=np.array(hist[0])
control=len(hist[1])-1
value=np.array(np.delete(hist[1],control,0))
#del value[-1]
#xhist=numpy.histogram(x,density=False)
#Session where we save in the same folder .txt files with the new data
np.savetxt('TwoCol.txt',Two)
np.savetxt('Norm.txt',Norm)
np.savetxt('Wien.txt',W)
np.savetxt('frequence.txt',hist[0])
#Session dedicated to all the needed fits
#Planck fit
p0=[1,2000]
#Fit Planck in the range [start:end]
bestP,covarP = curve_fit(Planck, x[start:end], Norm[start:end], p0)
TP=round(bestP[1],2)#Save Planck Temperature
eP=bestP[0]#Save planck Emissivity
xp=x[start:end]
FP=Planck(xp,eP,TP)#Create the new Planck with the fit parameters
PRes=abs(Norm[start:end]-FP)#Planck Residual
#Wien fit
invX1=invX[start:end]
W1=W[start:end]
#Fit Wien and control that there are no inf or nan arguments in the fit
bestW,covarW = curve_fit(FWien,invX1[(np.isfinite(W1))],W1[(np.isfinite(W1))],p0=[1,TP])
Residual=W1-FWien(invX1[(np.isfinite(W1))],*bestW)
#Save Wien temperature
TW=round(bestW[1])
#Gaussian fit to the histogram two-colours
popt,pcov = curve_fit(gaus,value,freq,p0=[1000,TP,100])
Thist=round(popt[1],2)#Save Histogram temperature
errTot=round(popt[2])#Save hiistogram FWHM
#create a file with all the results needed
Results=(TP,errTot,TW,abs(((TP-TW)/TP*100)),Thist)
np.savetxt('Results.txt',Results,newline='\n',header="Name Planck STD Wien errPerc Hist")
print 'Planck:',TP,'K ','+-',errTot,'K ','Wien:',TW,'K','Hist:',Thist,'K'
#Defines the method for plotting all the graphs
def plots(x,y,yC,Norm,xp,FP,PFrom,PTo,invX,W,invX1,bestW,Two2,value,freq,popt,TwoInt,Residual,TP):
fig=plt.figure(figsize=(8,11))#Defines dimension of the figure
#Adding subplots to show
ax1 = fig.add_subplot(3, 2, 1)
ax2 = fig.add_subplot(3, 2, 2)
ax3 = fig.add_subplot(3, 2, 3)
ax4 = fig.add_subplot(3, 2, 4)
ax5 = fig.add_subplot(3, 2, 5)
plt.subplots_adjust(wspace=0.3,hspace=0.3)
#Raw and calibration data subgraph
ax1.plot(x, y, x, yC,'red')
ax1.set_title('Raw vs Calib data')
ax1.set_xlabel('wavelength (nm)')
ax1.set_ylim(0,50000)
ax1.grid(True, linestyle='-')
ticklines = ax1.get_xticklines()
ticklines.extend( ax1.get_yticklines() )
gridlines = ax1.get_xgridlines()
gridlines.extend( ax1.get_ygridlines() )
ticklabels = ax1.get_xticklabels()
ticklabels.extend( ax1.get_yticklabels() )
# for line in ticklines:
# line.set_linewidth(3)
#
# for line in gridlines:
# line.set_linestyle('-')
#
# for label in ticklabels:
# label.set_color('black')
# label.set_fontsize('medium')
# txt=plt.text(4500,33,TP)
# txt1=plt.text(4200,33,'T=')
# txt2=plt.text(2000,17,TW)
# txt3=plt.text(1800,17,'T=')
# txt.set_size(15)
# txt1.set_size(15)
# txt2.set_size(15)
# txt3.set_size(15)
# fig.canvas.draw()
#Planck subgraph
ax2.plot(x, Norm, xp, FP,'red')
ax2.set_title('Planck')
ax2.set_xlabel('wavelength (nm)')
ax2.set_xlim(PFrom,PTo)
ax2.set_yticks([])
#ax2.grid(True)
def on_button_press(event):
#print dir(event)
#print "BADGER"
#print "Button:", event.button
#print "Figure coordinates:", event.x, event.y
print "Data coordinates:", event.xdata, event.ydata
#start=event.xdata
sys.stdout.flush()
#Wien subgraph
ax3.plot(invX,W,invX1,FWien(invX1,*bestW),'red',invX1,Residual)
ax3.set_title('Wien')
ax3.set_xlabel('1/wavelength (1/m)')
ax3.set_ylabel("Wien function")
ax3.set_xlim(10**9/PTo,10**9/PFrom)
ax3.set_yticks([])
#ax3.grid(True)
#Two Colours subgraph
ax4.plot(x,Two2,x[start:end],TwoInt,'red')
ax4.set_title('Sliding Two-Colours')
ax4.set_xlabel('wavelength (nm)')
ax4.set_ylabel('T (K)')
ax4.set_xlim(PFrom,PTo)
ax4.grid(True)
ticklines4 = ax4.get_xticklines()
ticklines4.extend( ax4.get_yticklines() )
gridlines4 = ax4.get_xgridlines()
gridlines4.extend( ax4.get_ygridlines() )
ticklabels4 = ax4.get_xticklabels()
ticklabels4.extend( ax4.get_yticklabels() )
for line in ticklines4:
line.set_linewidth(3)
for line in gridlines4:
line.set_linestyle('-')
for label in ticklabels4:
label.set_color('black')
label.set_fontsize('medium')
#Histogram subgraph
ax5.plot(value,freq,value,gaus(value,*popt),'red')
ax5.set_title('Histogram')
ax5.set_xlabel('T(K)')
ax5.set_ylabel('# Counts')
#pylab.show() #it plots everything
fig.canvas.mpl_connect('button_press_event', on_button_press)
plt.show()
#Plot range
PFrom=500
PTo=1000
plots(x,y,yC,Norm,xp,FP,PFrom,PTo,invX,W,invX1,bestW,Two2,value,freq,popt,TwoInt,Residual,TP) | apache-2.0 |
rizkiarm/LipNet | evaluation/confusion.py | 1 | 4493 | import nltk
import sys
import string
import os
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
arpabet = nltk.corpus.cmudict.dict()
def get_phonemes(word):
return [str(phoneme).translate(None, string.digits) for phoneme in arpabet[word][0]]
with open('phonemes.txt') as f:
labels = f.read().splitlines()
V1 = labels[0:7]
V2 = labels[7:10]
V3 = labels[10:14]
V4 = labels[14:17]
A = labels[17:21]
B = labels[21:23]
C = labels[23:27]
D = labels[27:31]
E = labels[31:34]
F = labels[34:36]
G = labels[36:38]
H = labels[38:42]
SCENARIOS = [
('Phonemes', labels),
('Lip-rounding based vowels', V1+V2+V3+V4),
('Alveolar-semivowels', A),
('Alveolar-fricatives', B),
('Alveolar', C),
('Palato-alveolar', D),
('Bilabial', E),
('Dental', F),
('Labio-dental', G),
('Velar', H)
]
def get_viseme(word):
phonemes = get_phonemes(word)
visemes = []
for phoneme in phonemes:
if phoneme in V1+V2+V3+V4:
visemes.append('V')
elif phoneme in A:
visemes.append('A')
elif phoneme in B:
visemes.append('B')
elif phoneme in C:
visemes.append('C')
elif phoneme in D:
visemes.append('D')
elif phoneme in E:
visemes.append('E')
elif phoneme in F:
visemes.append('F')
elif phoneme in G:
visemes.append('G')
elif phoneme in H:
visemes.append('H')
return visemes
def get_confusion_matrix(y_true, y_pred, labels, func):
# confusion_matrix = np.identity(len(labels))
confusion_matrix = np.zeros((len(labels),len(labels)))
for i in range(0,len(y_true)):
words_true = y_true[i].split(" ")
words_pred = y_pred[i].split(" ")
for j in range(0, len(words_true)):
phonemes_true = func(words_true[j])
phonemes_pred = func(words_pred[j])
max_length = min(len(phonemes_true),len(phonemes_pred))
phonemes_true = phonemes_true[:max_length]
phonemes_pred = phonemes_pred[:max_length]
try:
confusion_matrix = np.add(
confusion_matrix,
metrics.confusion_matrix(phonemes_true, phonemes_pred, labels=labels)
)
except:
continue
return confusion_matrix
y_true_path = sys.argv[1]
y_pred_path = sys.argv[2]
with open(y_true_path) as f:
y_true_r = f.read().splitlines()
with open(y_pred_path) as f:
y_pred_r = f.read().splitlines()
y_true = []
y_pred = []
for i in range(0,len(y_true_r)):
if y_true_r[i] in y_true:
continue
y_true.append(y_true_r[i])
y_pred.append(y_pred_r[i])
for k in range(0, len(SCENARIOS)):
_name = SCENARIOS[k][0]
_labels = SCENARIOS[k][1]
confusion_matrix = get_confusion_matrix(y_true,y_pred,_labels,get_phonemes)
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=_labels, normalize=True,
title=_name)
# plt.show()
savepath = os.path.join('confusions', _name + '.png')
print savepath
plt.savefig(savepath, bbox_inches='tight')
# INTRA-VISEMES
viseme_name = 'Intra-visemes'
viseme_labels = ['V', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
confusion_matrix = get_confusion_matrix(y_true,y_pred,viseme_labels,get_viseme)
plt.figure()
plot_confusion_matrix(confusion_matrix, classes=viseme_labels, normalize=True,
title=viseme_name)
# plt.show()
savepath = os.path.join('confusions', viseme_name + '.png')
print savepath
plt.savefig(savepath, bbox_inches='tight') | mit |
james4424/nest-simulator | pynest/examples/intrinsic_currents_subthreshold.py | 4 | 7182 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_subthreshold.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Intrinsic currents subthreshold
-------------------------------
This example illustrates how to record from a model with multiple
intrinsic currents and visualize the results. This is illustrated
using the `ht_neuron` which has four intrinsic currents: I_NaP,
I_KNa, I_T, and I_h. It is a slightly simplified implementation of
neuron model proposed in Hill and Tononi (2005) **Modeling Sleep
and Wakefulness in the Thalamocortical System** *J Neurophysiol* 93:1671
http://dx.doi.org/10.1152/jn.00915.2004 .
The neuron is driven by DC current, which is alternated
between depolarizing and hyperpolarizing. Hyperpolarization
intervals become increasingly longer.
See also: intrinsic_currents_spiking.py
'''
'''
We imported all necessary modules for simulation, analysis and
plotting.
'''
import nest
import numpy as np
import matplotlib.pyplot as plt
'''
Additionally, we set the verbosity using `set_verbosity` to
suppress info messages. We also reset the kernel to be sure to start
with a clean NEST.
'''
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
'''
We define simulation parameters:
- The length of depolarization intervals
- The length of hyperpolarization intervals
- The amplitude for de- and hyperpolarizing currents
- The end of the time window to plot
'''
n_blocks = 5
t_block = 20.
t_dep = [t_block] * n_blocks
t_hyp = [t_block * 2 ** n for n in range(n_blocks)]
I_dep = 10.
I_hyp = -5.
t_end = 500.
'''
We create the one neuron instance and the DC current generator
and store the returned handles.
'''
nrn = nest.Create('ht_neuron')
dc = nest.Create('dc_generator')
'''
We create a multimeter to record
- membrane potential `V_m`
- threshold value `Theta`
- intrinsic currents `I_NaP`, `I_KNa`, `I_T`, `I_h`
by passing these names in the `record_from` list.
To find out which quantities can be recorded from a given neuron,
run::
nest.GetDefaults('ht_neuron')['recordables']
The result will contain an entry like::
<SLILiteral: V_m>
for each recordable quantity. You need to pass the value of the `SLILiteral`,
in this case `V_m` in the `record_from` list.
We want to record values with 0.1 ms resolution, so we set the
recording interval as well; the default recording resolution is 1 ms.
'''
# create multimeter and configure it to record all information
# we want at 0.1ms resolution
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'Theta',
'I_NaP', 'I_KNa', 'I_T', 'I_h']}
)
'''
We connect the DC generator and the multimeter to the neuron.
Note that the multimeter, just like the voltmeter is connected
to the neuron, not the neuron to the multimeter.
'''
nest.Connect(dc, nrn)
nest.Connect(mm, nrn)
'''
We are ready to simulate. We alternate between driving the neuron
with depolarizing and hyperpolarizing currents. Before each simulation
interval, we set the amplitude of the DC generator to the correct value.
'''
for t_sim_dep, t_sim_hyp in zip(t_dep, t_hyp):
nest.SetStatus(dc, {'amplitude': I_dep})
nest.Simulate(t_sim_dep)
nest.SetStatus(dc, {'amplitude': I_hyp})
nest.Simulate(t_sim_hyp)
'''
We now fetch the data recorded by the multimeter. The data are
returned as a dictionary with entry ``'times'`` containing timestamps
for all recorded data, plus one entry per recorded quantity.
All data is contained in the ``'events'`` entry of the status dictionary
returned by the multimeter. Because all NEST function return arrays,
we need to pick out element ``0`` from the result of `GetStatus`.
'''
data = nest.GetStatus(mm)[0]['events']
t = data['times']
'''
The next step is to plot the results. We create a new figure, add a
single subplot and plot at first membrane potential and threshold.
'''
fig = plt.figure()
Vax = fig.add_subplot(111)
Vax.plot(t, data['V_m'], 'b-', lw=2, label=r'$V_m$')
Vax.plot(t, data['Theta'], 'g-', lw=2, label=r'$\Theta$')
Vax.set_ylim(-80., 0.)
Vax.set_ylabel('Voltageinf [mV]')
Vax.set_xlabel('Time [ms]')
'''
To plot the input current, we need to create an input
current trace. We construct it from the durations of the de- and
hyperpolarizing inputs and add the delay in the connection between
DC generator and neuron:
1. We find the delay by checking the status of the dc->nrn connection.
1. We find the resolution of the simulation from the kernel status.
1. Each current interval begins one time step after the previous interval,
is delayed by the delay and effective for the given duration.
1. We build the time axis incrementally. We only add the delay when adding
the first time point after t=0. All subsequent points are then automatically
shifted by the delay.
'''
delay = nest.GetStatus(nest.GetConnections(dc, nrn))[0]['delay']
dt = nest.GetKernelStatus('resolution')
t_dc, I_dc = [0], [0]
for td, th in zip(t_dep, t_hyp):
t_prev = t_dc[-1]
t_start_dep = t_prev + dt if t_prev > 0 else t_prev + dt + delay
t_end_dep = t_start_dep + td
t_start_hyp = t_end_dep + dt
t_end_hyp = t_start_hyp + th
t_dc.extend([t_start_dep, t_end_dep, t_start_hyp, t_end_hyp])
I_dc.extend([I_dep, I_dep, I_hyp, I_hyp])
'''
The following function turns a name such as I_NaP into proper TeX code
$I_{\mathrm{NaP}}$ for a pretty label.
'''
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
'''
Next, we add a right vertical axis and plot the currents with respect
to that axis.
'''
Iax = Vax.twinx()
Iax.plot(t_dc, I_dc, 'k-', lw=2, label=texify_name('I_DC'))
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
Iax.set_xlim(0, t_end)
Iax.set_ylim(-10., 15.)
Iax.set_ylabel('Current [pA]')
Iax.set_title('ht_neuron driven by DC current')
'''
We need to make a little extra effort to combine lines from the two axis
into one legend.
'''
lines_V, labels_V = Vax.get_legend_handles_labels()
lines_I, labels_I = Iax.get_legend_handles_labels()
try:
Iax.legend(lines_V + lines_I, labels_V + labels_I, fontsize='small')
except TypeError:
# work-around for older Matplotlib versions
Iax.legend(lines_V + lines_I, labels_V + labels_I)
'''
Note that I_KNa is not activated in this example because the neuron does
not spike. I_T has only a very small amplitude.
'''
| gpl-2.0 |
anisyonk/pilot | Experiment.py | 3 | 37135 | # Class definition:
# Experiment
# This class is the main experiment class; ATLAS etc will inherit from this class
# Instances are generated with ExperimentFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
import os
import re
import time
import commands
from subprocess import Popen, PIPE
from PilotErrors import PilotErrors
from pUtil import tolog # Dump to pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import getCmtconfig # cmtconfig (move to subclass)
from pUtil import getDirectAccessDic # Get the direct access dictionary
from pUtil import isBuildJob # Is the current job a build job?
from pUtil import remove # Used to remove redundant file before log file creation
from pUtil import getPilotlogFilename # Used in the subprocess arguments method
from pUtil import extractHPCInfo # Used by getSubprocessName() to determine HPC plug-in if necessary
class Experiment(object):
# experiment = "generic" # String defining the experiment
# private data members
__experiment = "generic" # String defining the experiment
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
__doFileLookups = False # True for LFC based file lookups (basically a dummy data member here since singleton object is static)
__cache = "" # Cache URL used e.g. by LSST
# Required methods
def __init__(self, *args, **kwargs):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
# self.experiment = kwargs.get('experiment')
pass
def getExperiment(self):
""" Return a string with the experiment name """
# return self.experiment
return self.__experiment
def getJobExecutionCommand(self):
""" Define and test the command(s) that will be used to execute the payload """
# E.g. cmd = "source <path>/setup.sh; <path>/python <script>"
cmd = ""
return cmd
def getFileLookups(self):
""" Return the file lookup boolean """
return self.__doFileLookups
def doFileLookups(self, doFileLookups):
""" Update the file lookups boolean """
# Only implement this method if class really wants to update the __doFileLookups boolean
# ATLAS wants to implement this, but not CMS
# Method is used by Mover
# self.__doFileLookups = doFileLookups
pass
def willDoAlternativeFileLookups(self):
""" Should file lookups be done using alternative methods? """
# E.g. in the migration period where LFC lookups are halted in favour of other methods in the Rucio API
# (for ATLAS), this method could be useful. See the usage in Mover::getReplicaDictionary() which is called
# after Experiment::willDoFileLookups() defined above. The motivation is that direct LFC calls are not to be
# used any longer by the pilot, and in the migration period the actual LFC calls will be done in the Rucio
# API. Eventually this API will switch to alternative file lookups.
return False
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return self.__doFileLookups
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
def getFileCatalog(self):
""" Return the default file catalog to use (e.g. for replica lookups) """
# See usage in Mover.py
# e.g. 'lfc://prod-lfc-atlas.cern.ch:/grid/atlas'
return ""
# Additional optional methods
# These methods are optional and can be left as they are here, or modified according to special needs
def verifyProxy(self, envsetup="", limit=None):
""" Check for a valid voms/grid proxy longer than N hours """
# Use 'limit' to set required length
tolog("(verifyProxy() is not implemented)")
exitcode = 0
pilotErrorDiag = ""
return exitcode, pilotErrorDiag
def removeRedundantFiles(self, workdir):
""" Remove redundant files and directories """
# List of files and directories to be removed from work directory prior to log file creation
# Make sure that any large files or directories that are not wanted in the log file are included in this list
dir_list = [
"buildJob*",
"external",
"fort.*",
"home",
"python",
"share",
"workdir",
"*.py",
"*.pyc",
"*.root*",
"JEM",
"tmp*",
"*.tmp",
"*.TMP",
"scratch",
]
for _dir in dir_list:
files = glob(os.path.join(workdir, _dir))
rc = remove(files)
if not rc:
tolog("IGNORE: Failed to remove redundant file(s): %s" % (files))
def getPayloadName(self, job):
""" Set a suitable name for the payload stdout """
# The payload <name> gets translated into <name>_stdout.txt
# which is the name of the stdout file produced by the payload execution
# (essentially commands.getoutput("<setup>; <payload executable> [options] > <name>_stdout.txt"))
# The job object can be used to create more precise stdout names (see e.g. the ATLASExperiment implementation)
return "payload"
def isOutOfMemory(self, **kwargs):
""" Try to identify out of memory errors in the stderr/out """
return False
def getNumberOfEvents(self, **kwargs):
""" Return the number of events """
return 0
def specialChecks(self, **kwargs):
""" Implement special checks here """
# Return False if fatal failure, otherwise return True
# The pilot will abort if this method returns a False
# On an HPC system, it might be good to skip certain checks (e.g. CVMFS, LFC, etc). Refer to schedconfig.resourcetype, set to 'hpc' on an HPC queue
status = False
tolog("No special checks for \'%s\'" % (self.experiment))
return True # obviously change this to 'status' once implemented
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
ec = 0
tolog("No special env var checks for site %s" % (sitename))
return ec
def setINDS(self, realDatasetsIn):
""" Extract the dataset as set by pathena option --inDS and set the INDS environmental variable """
# Needed by pathena (move to ATLASExperiment later)
inDS = ""
for ds in realDatasetsIn:
if "DBRelease" not in ds and ".lib." not in ds:
inDS = ds
break
if inDS != "":
tolog("Setting INDS env variable to: %s" % (inDS))
os.environ['INDS'] = inDS
else:
tolog("INDS unknown")
def getValidBaseURLs(self, order=None):
""" Return list of valid base URLs """
# if order is defined, return given item first
# e.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
validBaseURLs = []
_validBaseURLs = ["http://www.usatlas.bnl.gov",\
"https://www.usatlas.bnl.gov",\
"http://pandaserver.cern.ch",\
"http://atlpan.web.cern.ch/atlpan",\
"https://atlpan.web.cern.ch/atlpan",\
"http://classis01.roma1.infn.it",\
"http://atlas-install.roma1.infn.it"]
if order:
validBaseURLs.append(order)
for url in _validBaseURLs:
if url != order:
validBaseURLs.append(url)
else:
validBaseURLs = _validBaseURLs
tolog("getValidBaseURLs will return: %s" % str(validBaseURLs))
return validBaseURLs
def downloadTrf(self, wgetCommand, jobTrf):
""" Download the trf """
status = False
pilotErrorDiag = ""
cmd = "%s %s" % (wgetCommand, jobTrf)
trial = 1
max_trials = 3
# try to download the trf a maximum of 3 times
while trial <= max_trials:
tolog("Executing command [Trial %d/%d]: %s" % (trial, max_trials, cmd))
ec, rets = commands.getstatusoutput(cmd)
if not rets:
rets = "(None)"
if ec != 0:
# Analyze exit code / output
from futil import check_syserr
check_syserr(ec, rets)
pilotErrorDiag = "wget command failed: %d, %s" % (ec, rets)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
if trial == max_trials:
tolog("!!FAILED!!3000!! Could not download trf: %s" % (rets))
status = False
break
else:
tolog("Will try again after 60s..")
from time import sleep
sleep(60)
else:
pilotErrorDiag = ""
tolog("wget command returned: %s" % (rets))
status = True
break
trial += 1
return status, pilotErrorDiag
def getAnalysisTrf(self, wgetCommand, origTRF, pilot_initdir):
""" Get the trf to be used for analysis jobs """
pilotErrorDiag = ""
trfName = origTRF.split('/')[-1]
tolog("trfName = %s" % (trfName))
origBaseURL = ""
# Copy trf from pilot init dir if distributed with pilot code
fname = os.path.join(pilot_initdir, trfName)
status = False
if os.path.exists(fname):
from shutil import copy2
try:
copy2(fname, os.getcwd())
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy trf from pilot init dir: %s" % str(e))
else:
tolog("Copied trf (%s) from pilot init dir" % (fname))
status = True
# Download trf
if not status:
# verify the base URL
for baseURL in self.getValidBaseURLs():
if origTRF.startswith(baseURL):
origBaseURL = baseURL
break
if origBaseURL == "":
pilotErrorDiag = "Invalid base URL: %s" % (origTRF)
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
else:
tolog("Verified the trf base URL: %s" % (origBaseURL))
# try to download from the required location, if not - switch to backup
for baseURL in self.getValidBaseURLs(order=origBaseURL):
trf = re.sub(origBaseURL, baseURL, origTRF)
tolog("Attempting to download trf: %s" % (trf))
status, pilotErrorDiag = self.downloadTrf(wgetCommand, trf)
if status:
break
if not status:
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
tolog("Successfully downloaded trf")
tolog("Changing permission of %s to 0755" % (trfName))
try:
os.chmod(trfName, 0755)
except Exception, e:
pilotErrorDiag = "Failed to chmod %s: %s" % (trfName, str(e))
return self.__error.ERR_CHMODTRF, pilotErrorDiag, ""
return 0, pilotErrorDiag, trfName
def getAnalysisRunCommand(self, job, jobSite, trfName):
""" Get the run command for analysis jobs """
# The run command is used to setup up the user job transform
ec = 0
pilotErrorDiag = ""
run_command = ""
return ec, pilotErrorDiag, run_command
def getFileTransferInfo(self, transferType, buildJob):
""" Get all relevant fields related to file transfer """
copysetup = readpar('copysetupin')
# create the direct access dictionary
fileTransferInfo = getDirectAccessDic(copysetup)
# if copysetupin did not contain direct access info, try the copysetup instead
if not fileTransferInfo:
copysetup = readpar('copysetup')
fileTransferInfo = getDirectAccessDic(copysetup)
# should the copytool be used?
useCopyTool = False
useFileStager = False
useDirectAccess = False
oldPrefix = ""
newPrefix = ""
dInfo = None
if fileTransferInfo:
dInfo = True
# no direct access / remote I/O, use standard copytool (copy-to-scratch)
if fileTransferInfo['useCopyTool']:
useCopyTool = True
# do not set the LFC host for file stager
if fileTransferInfo['useFileStager']:
useFileStager = True
if fileTransferInfo['directIn']:
useDirectAccess = True
oldPrefix = fileTransferInfo['oldPrefix']
newPrefix = fileTransferInfo['newPrefix']
# override settings for transferType direct
if transferType == 'direct':
useCopyTool = False
useFileStager = False
useDirectAccess = True
# should pilot create TURL based PFC? (not done here, but setup needs to be aware of it)
# if dInfo and useDirectAccess and oldPrefix == "" and newPrefix == "":
if (transferType == 'direct' or (useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == "") and not buildJob:
# if (transferType == 'direct' or (not useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == ""):
usePFCTurl = True
else:
usePFCTurl = False
# force usePFCTurl for all jobs
if not buildJob and useDirectAccess:
tolog("Forced usePFCTurl (reset old/newPrefix)")
usePFCTurl = True
oldPrefix = ""
newPrefix = ""
if os.environ.get("TestXRootD", 'False') == 'True':
import re
re.sub(r'\/xrootdsetup\.sh', '/xrootdsetup-dev.sh', copysetup)
return dInfo, useCopyTool, useDirectAccess, useFileStager, oldPrefix, newPrefix, copysetup, usePFCTurl
def getGuidsFromJobPars(self, jobPars, inputFiles, inFilesGuids):
""" Extract the correct guid from the input file list """
# the guids list is used for direct reading in an LFC environment
# 1. extract input file list for direct reading from jobPars
# 2. for each input file in this list, find the corresponding guid from the input file guid list
# since jobPars is entered by a human, the order of the input files might not be the same
guidList = []
jobPars = jobPars.replace("'","")
jobPars = jobPars.replace(", ",",")
pattern = re.compile(r'\-i \"\[([A-Za-z0-9.,_-]+)\]\"')
directReadingInputFiles = re.findall(pattern, jobPars)
inFiles = []
if directReadingInputFiles != []:
inFiles = directReadingInputFiles[0].split(",")
else:
match = re.search("-i ([A-Za-z0-9.\[\],_-]+) ", jobPars)
if match != None:
compactInFiles = match.group(1)
match = re.search('(.*)\[(.+)\](.*)\[(.+)\]', compactInFiles)
if match != None:
inputFiles = []
head = match.group(1)
tail = match.group(3)
body = match.group(2).split(',')
attr = match.group(4).split(',')
for idx in range(len(body)):
lfn = '%s%s%s%s' % (head, body[idx], tail, attr[idx])
inputFiles.append(lfn)
else:
inputFiles = [compactInFiles]
if inFiles != []:
for inFile in inFiles:
# get the corresponding index from the inputFiles list, which has the same order as inFilesGuids
try:
index = inputFiles.index(inFile)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s (direct reading will fail)" % str(e))
else:
# add the corresponding guid to the list
guidList.append(inFilesGuids[index])
return guidList
def getMetadataForRegistration(self, guid):
""" Return metadata for [LFC] file registration """
# This method can insert special metadata into the metadata.xml file
# E.g. it can add preliminary XML tags for info that will only be known
# at a later time, such as "<metadata att_name="surl" att_value="%s-surltobeset"/>\n' % (guid)"
# The <guid>-surltobeset will be replaced by the pilot by the appropriate value once it is known
# Inputs:
# guid = file guid
# Returns:
# metadata string
# See e.g. the CMSExperiment implementation
# The method is called from pUtil::PFCxml() during metadata file creation
return ""
def getAttrForRegistration(self):
""" Return the attribute of the metadata XML to be updated with surl value """
# Used in combination with Experiment::getMetadataForRegistration()
# The attribute (default 'surl') will be copied into the metadata string used for pattern matching
# E.g. re.compile('\<metadata att\_name\=\"%s\" att\_value\=\"([a-zA-Z0-9-]+)\-surltobeset\"\/\>' % (attribute))
return 'surl'
def getExpSpecificMetadata(self, job, workdir):
""" Return experiment specific metadata """
# Inputs:
# job = PanDA pilot job object (see Job class)
# workdir = relevant work directory where the metadata is located
# Returns:
# metadata xml string
# See e.g. implementation in CMSExperiment
return ""
def getFileCatalogHosts(self):
""" Return a list of file catalog hosts """
# The method is used in combination with federated xrootd (FAX).
# In case FAX is allowed on a given site, the pilot might need to lookup
# replica information in more than one LFC catalog. Normally a site has only
# one LFC (as set in schedconfig.lfchost). Providing a list of hosts will increase
# the probability that FAX will succeed
# See e.g. ATLASExperiment implementation
return []
def verifySwbase(self, appdir):
""" Confirm existence of appdir/swbase """
# appdir/swbase is a queuedata parameter specifying the base location of physics analysis / release software
# This method will simply verify that the corresponding directory exists
#
# Input:
# appdir = application/software/release directory (e.g. /cvmfs/atlas.cern.ch/repo/sw)
# Return:
# error code (0 for success)
return 0
def interpretPayloadStdout(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Payload error interpretation and handling """
# NOTE: TODO, hide argument complexity with kwargs**
# This method can be used to interpret special errors that only occur in actual payload stdout, e.g. memory errors that have
# caused the payload to crash
#
# Inputs:
# job = PanDA pilot job object (see Job class)
# res =
# getstatusoutput_was_interrupted = True in case the payload execution command was aborted (e.g. keyboard CTRL-C)
# current_job_number = current job number, in case of multi-trf (ATLAS)
# runCommandList = list of payload execution commands (e.g. used by ATLAS to get to a setup file)
# failureCode = signal error code
# Returns:
# Updated PanDA pilot job objectwith proper payload error information, if needed
#
# The following Job attributes can be updated here
# result = tuple of size 3 that contain the standard error info: result[0] = current job status (e.g. failed, finished, holding),
# result[1] = payload error code, result[2] = PanDA pilot error code
# pilotErrorDiag = error diagnostics (string of up to 256 characters that will appear on the PanDA monitor job web page for a failed job)
# exeError
return job
def getSubprocessName(self, eventService):
""" Select which subprocess is to be run by the Monitor """
# The default subprocess is RunJob (name='Normal', which performs payload setup, stage-in, payload execution and stage-out).
# An alternative subprocess is the runEvent module which downloads events from an Event Server, executes a payload
# and stages ou output files asynchronously as they are ready.
# Note: send the entire job object to this method since there might be other subprocesses created at a later time which
# will be identified by this method using some other job data member
# Default subprocess name
name = "RunJob"
# Select alternative subprocess names for HPCs
isHPC, _name = extractHPCInfo(readpar('catchall'))
if isHPC:
name = "RunJob" + _name # e.g. "RunJobTitan" is the proper subprocess name for the Titan plug-in
# for es merge jobs
if _name and _name.startswith("Hpc"):
name = "RunJob"
# Are we going to run an event service job?
if eventService:
tolog("Encountered an event service job")
if isHPC:
name = "RunJob%sEvent" % (_name)
else:
name = "RunJobEvent"
tolog("Selected subprocess: %s" % (name))
return name
def getSubprocessArguments(self, env, port, subprocessName="RunJob"):
""" Argument list needed to launch the subprocess by the pilot/Monitor """
# The pilot/Monitor is forking a subprocess which will be monitored for work dir size, hanging processes etc
# This method returns the arguments needed to execute the subprocess (python <subprocess name> <arguments>)
# By default the pilot has implementations for RunJob.py (standard job) and RunJobEvent.py (event server job)
# If a new subprocess module is added, it startup arguments need to be specified here
jobargs = None
tolog("Will set up subprocess arguments for type: %s" % (subprocessName))
url = '%s:%s/server/panda' % (env['pshttpurl'], str(env['psport']))
if subprocessName == "RunJobEvent":
jobargs = [env['pyexe'], "RunJobEvent.py",
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
else:
jobargs = [env['pyexe'], "%s.py" % (subprocessName),
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
if 'yodaNodes' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-N")
jobargs.append(str(env['yodaNodes']))
if 'yodaQueue' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-Q")
jobargs.append(str(env['yodaQueue']))
tolog("Will use arguments: %s" % str(jobargs))
return jobargs
# Optional
def doSpecialLogFileTransfer(self, **argdict):
""" Should the log file be transfered to a special SE? """
# The log file can at the end of the job be stored in a special SE - in addition to the normal stage-out of the log file
# If this method returns True, the JobLog class will attempt to store the log file in a secondary SE after the transfer of
# the log to the primary/normal SE. Additional information about the secondary SE is required and can be specified in
# another optional method defined in the *Experiment classes
# eventService = argdict.get('eventService', False)
return False
# Optional
def getSchedconfigURL(self, protocol="http://"):
""" Define the URL for the schedconfig / PanDA server"""
# This method gets called from SiteInformation in case the URL is not set (by the wrapper)
return protocol + "pandaserver.cern.ch"
# Optional
def getSubprocess(self, cmd, stdout=None, stderr=None):
""" Execute and return a subprocess """
process = None
try:
tolog("Executing command: %s" % (cmd))
if stdout and stderr:
# use stdout/stdout file objects to redirect the stdout/stderr streams
process = Popen(cmd, shell=True, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
else:
process = Popen(cmd, shell=True)
except Exception, e:
tolog("!!WARNING!!2344!! Caught exception: %s" % (e))
else:
tolog("Subprocess is running")
return process
# Optional
def getJobExecutionCommand4EventService(self):
""" Define and test the command(s) that will be used to execute the payload for the event service """
# E.g. cmd = ["source <path>/setup.sh; <path>/python <script>"]
# The command returned from this method is executed using subprocess.Popen() from the runEvent module
# Note: this optional method only need to be defined in case the event service is to be used
# As of March 2014, this is not yet functional or documented.
# The actual command must be declared as a list since that is expected by Popen()
cmd = [""]
return cmd
# Optional
def postGetJobActions(self, job):
""" Perform any special post-job definition download actions here """
# This method is called after the getJob() method has successfully downloaded a new job (job definition) from
# the server. If the job definition e.g. contains information that contradicts WN specifics, this method can
# be used to fail the job
# Return any error code using ec, and any error message using pilotErrorDiag
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag
# Optional
def useTracingService(self):
return False
# Optional
def updateJobSetupScript(self, workdir, create=False, to_script=None):
""" Create or update the job setup script (used to recreate the job locally if needed) """
# If create=True, this step will only create the file with the script header (bash info)
if create:
filename = os.path.basename(self.getJobSetupScriptName(workdir))
tolog("Creating job setup script with stage-in and payload execution commands: %s" % (filename))
to_script = "#!/bin/bash\n# %s %s\n\n" % (filename, time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time())))
# Add the string to the setup script
if to_script:
self.addToJobSetupScript(to_script, workdir)
# Optional
def getJobSetupScriptName(self, workdir):
""" return the name of the job setup file """
return os.path.join(workdir, "job_setup.sh")
# Optional
def addToJobSetupScript(self, cmd, workdir):
""" add/append command to job setup file """
filename = self.getJobSetupScriptName(workdir)
if not os.path.exists(filename):
try:
fp = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for writing: %s" % str(e))
else:
try:
fp = open(filename, "a")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for appending: %s" % str(e))
if fp:
fp.write(cmd)
fp.write("\n\n")
fp.close()
tolog("Updated %s: %s" % (filename, cmd))
# Optional
def getRelease(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
return release.split("\n")
# Optional
def formatReleaseString(release):
""" Return a special formatted release string """
# E.g. release = "Atlas-19.0.0" -> "19.0.0"
# This method is required for ATLAS but is probably of no interest for any other PanDA user
return release
# Optional
def setCache(self, cache):
""" Cache URL """
# Used e.g. by LSST
self.__cache = cache
# Optional
def getCache(self):
""" Return the cache URL """
# Used e.g. by LSST
return self.__cache
# Optional
def useTracingService(self):
""" Use the Rucio Tracing Service """
# A service provided by the Rucio system that allows for file transfer tracking; all file transfers
# are reported by the pilot to the Rucio Tracing Service if this method returns True
return False
# Optional
def updateJobDefinition(self, job, filename):
""" Update the job definition file and object before using it in RunJob """
# This method is called from Monitor, before RunJob is launched, which allows to make changes to the job object after it was downloaded from the job dispatcher
# (used within Monitor) and the job definition file (which is used from RunJob to recreate the same job object as is used in Monitor).
# 'job' is the job object, defined in Job.py, while 'filename' is the name of the file containing the job definition information.
return job
# Optional
def shouldExecuteUtility(self):
""" Determine whether a special utility should be executed """
# The RunJob class has the possibility to execute a special utility, e.g. a memory monitor, that runs in parallel
# to the payload (launched after the main payload process).
# The utility is executed if this method returns True. The utility is currently expected to produce
# a summary JSON file whose name is defined by the getUtilityJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the job update.
#
# Example of summary JSON file (ATLAS case):
# {"Max":{"maxVMEM":40058624,"maxPSS":10340177,"maxRSS":16342012,"maxSwap":16235568},
# "Avg":{"avgVMEM":19384236,"avgPSS":5023500,"avgRSS":6501489,"avgSwap":5964997}}
#
# While running, the MemoryMonitor also produces a regularly updated text file with the following format: (tab separated)
# Time VMEM PSS RSS Swap (first line in file)
# 1447960494 16099644 3971809 6578312 1978060
return False
# Optional
def getUtilityOutputFilename(self):
""" Return the filename of a utility output file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_output.txt"
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of a utility JSON file """
# For explanation, see shouldExecuteUtility()
return "utility_summary.json"
# Optional
def getUtilityInfo(self, workdir, pilot_initdir, allowTxtFile=False):
""" Add the utility info to the node structure if available """
# Extract the relevant information from the utility tool output and add it to the dictionary
# returned by this method. The dictionary will be merged with the node dictionary in
# PandaServerClient::getNodeStructure() and sent to the PanDA server
return {}
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
# pid = argdict.get('pid', 0)
return ""
# Optional
def getGUIDSourceFilename(self):
""" Return the filename of the file containing the GUIDs for the output files """
# In the case of ATLAS, Athena produces an XML file containing the GUIDs of the output files. The name of this
# file is PoolFileCatalog.xml. If this method returns an empty string (ie the default), the GUID generation will
# be done by the pilot in RunJobUtilities::getOutFilesGuids()
return ""
# Optional
def buildFAXPath(self, **argdict):
""" Build a proper FAX path """
# This method builds proper FAX paths and is used in pure FAX mode (i.e. when FAX is used in forced mode),
# particularly when the PoolFileCatalog.xml is built prior to stage-in
# Only needed if FAX mechanism is used in forced mode (i.e. when copytoolin='fax')
lfn = argdict.get('lfn', 'default_lfn')
scope = argdict.get('scope', 'default_scope')
subpath = argdict.get('subpath', 'atlas/rucio/')
pandaID = argdict.get('pandaID', '')
sourceSite = argdict.get('sourceSite', 'default_sourcesite')
computingSite = argdict.get('computingSite', 'default_computingsite')
# Get the proper FAX redirector (default ATLAS implementation)
from FAXTools import getFAXRedirectors
# First get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)
fax_redirectors_dictionary = getFAXRedirectors(computingSite, sourceSite, pandaID)
tolog("fax_redirectors_dictionary=%s"%str(fax_redirectors_dictionary))
# select the proper fax redirector
if ".lib." in lfn:
redirector = fax_redirectors_dictionary['computingsite']
else:
redirector = fax_redirectors_dictionary['sourcesite']
# Make sure the redirector ends with a double slash
if not redirector.endswith('//'):
if redirector.endswith('/'):
redirector += "/"
else:
redirector += "//"
# Make sure that the subpath does not begin with a slash
if subpath.startswith('/') and len(subpath) > 1:
subpath = subpath[1:]
tolog("redirector=%s"%(redirector))
tolog("subpath=%s"%(subpath))
tolog("scope=%s"%(scope))
tolog("lfn=%s"%(lfn))
return redirector + subpath + scope + ":" + lfn
if __name__ == "__main__":
a=Experiment()
print a.getSubprocessName(False)
| apache-2.0 |
aflaxman/scikit-learn | examples/mixture/plot_gmm_sin.py | 103 | 6101 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example demonstrates the behavior of Gaussian mixture models fit on data
that was not sampled from a mixture of Gaussian random variables. The dataset
is formed by 100 points loosely spaced following a noisy sine curve. There is
therefore no ground truth value for the number of Gaussian components.
The first model is a classical Gaussian Mixture Model with 10 components fit
with the Expectation-Maximization algorithm.
The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process
prior fit with variational inference. The low value of the concentration prior
makes the model favor a lower number of active components. This models
"decides" to focus its modeling power on the big picture of the structure of
the dataset: groups of points with alternating directions modeled by
non-diagonal covariance matrices. Those alternating directions roughly capture
the alternating nature of the original sine signal.
The third model is also a Bayesian Gaussian mixture model with a Dirichlet
process prior but this time the value of the concentration prior is higher
giving the model more liberty to model the fine-grained structure of the data.
The result is a mixture with a larger number of active components that is
similar to the first model where we arbitrarily decided to fix the number of
components to 10.
Which model is the best is a matter of subjective judgement: do we want to
favor models that only capture the big picture to summarize and explain most of
the structure of the data while ignoring the details or do we prefer models
that closely follow the high density regions of the signal?
The last two panels show how we can sample from the last two models. The
resulting samples distributions do not look exactly like the original data
distribution. The difference primarily stems from the approximation error we
made by using a model that assumes that the data was generated by a finite
number of Gaussian components instead of a continuous noisy sine curve.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y, means, covariances, index, title):
splot = plt.subplot(5, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
def plot_samples(X, Y, n_components, index, title):
plt.subplot(5, 1, 4 + index)
for i, color in zip(range(n_components), color_iter):
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
# Parameters
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4. * np.pi / n_samples
for i in range(X.shape[0]):
x = i * step - 6.
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3. * (np.sin(x) + np.random.normal(0, .2))
plt.figure(figsize=(10, 10))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.2, wspace=.05,
left=.03, right=.97)
# Fit a Gaussian mixture with EM using ten components
gmm = mixture.GaussianMixture(n_components=10, covariance_type='full',
max_iter=100).fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Expectation-maximization')
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e-2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="random", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=0.01$.")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 0,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=0.01$ sampled with $2000$ samples.")
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e+2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="kmeans", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=100$")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 1,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=100$ sampled with $2000$ samples.")
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/core/groupby/grouper.py | 1 | 28910 | """
Provide user facing operators for doing the split part of the
split-apply-combine paradigm.
"""
from typing import Dict, Hashable, List, Optional, Tuple
import warnings
import numpy as np
from pandas._typing import FrameOrSeries
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.algorithms as algorithms
from pandas.core.arrays import Categorical, ExtensionArray
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.groupby import ops
from pandas.core.groupby.categorical import recode_for_groupby, recode_from_groupby
from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.series import Series
from pandas.io.formats.printing import pprint_thing
class Grouper:
"""
A Grouper allows the user to specify a groupby instruction for an object.
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
If `axis` and/or `level` are passed as keywords to both `Grouper` and
`groupby`, the values passed to `Grouper` take precedence.
Parameters
----------
key : str, defaults to None
Groupby key, which selects the grouping column of the target.
level : name/number, defaults to None
The level for the target index.
freq : str / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
axis : str, int, defaults to 0
Number/name of the axis.
sort : bool, default to False
Whether to sort the resulting labels.
closed : {'left' or 'right'}
Closed end of interval. Only when `freq` parameter is passed.
label : {'left' or 'right'}
Interval boundary to use for labeling.
Only when `freq` parameter is passed.
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex and `freq` parameter is passed.
base : int, default 0
Only when `freq` parameter is passed.
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
.. deprecated:: 1.1.0
The new arguments that you should use are 'offset' or 'origin'.
loffset : str, DateOffset, timedelta object
Only when `freq` parameter is passed.
.. deprecated:: 1.1.0
loffset is only working for ``.resample(...)`` and not for
Grouper (:issue:`28302`).
However, loffset is also deprecated for ``.resample(...)``
See: :class:`DataFrame.resample`
origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin must
match the timezone of the index.
If a timestamp is not used, these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
.. versionadded:: 1.1.0
offset : Timedelta or str, default is None
An offset timedelta added to the origin.
.. versionadded:: 1.1.0
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df = pd.DataFrame(
... {
... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
... "Speed": [100, 5, 200, 300, 15],
... }
... )
>>> df
Animal Speed
0 Falcon 100
1 Parrot 5
2 Falcon 200
3 Falcon 300
4 Parrot 15
>>> df.groupby(pd.Grouper(key="Animal")).mean()
Speed
Animal
Falcon 200
Parrot 10
Specify a resample operation on the column 'Publish date'
>>> df = pd.DataFrame(
... {
... "Publish date": [
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-02"),
... pd.Timestamp("2000-01-09"),
... pd.Timestamp("2000-01-16")
... ],
... "ID": [0, 1, 2, 3],
... "Price": [10, 20, 30, 40]
... }
... )
>>> df
Publish date ID Price
0 2000-01-02 0 10
1 2000-01-02 1 20
2 2000-01-09 2 30
3 2000-01-16 3 40
>>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
ID Price
Publish date
2000-01-02 0.5 15.0
2000-01-09 2.0 30.0
2000-01-16 3.0 40.0
If you want to adjust the start of the bins based on a fixed timestamp:
>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00 0
2000-10-01 23:37:00 3
2000-10-01 23:44:00 6
2000-10-01 23:51:00 9
2000-10-01 23:58:00 12
2000-10-02 00:05:00 15
2000-10-02 00:12:00 18
2000-10-02 00:19:00 21
2000-10-02 00:26:00 24
Freq: 7T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min')).sum()
2000-10-01 23:14:00 0
2000-10-01 23:31:00 9
2000-10-01 23:48:00 21
2000-10-02 00:05:00 54
2000-10-02 00:22:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
2000-10-01 23:18:00 0
2000-10-01 23:35:00 18
2000-10-01 23:52:00 27
2000-10-02 00:09:00 39
2000-10-02 00:26:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
2000-10-01 23:24:00 3
2000-10-01 23:41:00 15
2000-10-01 23:58:00 45
2000-10-02 00:15:00 45
Freq: 17T, dtype: int64
If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:
>>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
>>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
2000-10-01 23:30:00 9
2000-10-01 23:47:00 21
2000-10-02 00:04:00 54
2000-10-02 00:21:00 24
Freq: 17T, dtype: int64
To replace the use of the deprecated `base` argument, you can now use `offset`,
in this example it is equivalent to have `base=2`:
>>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
2000-10-01 23:16:00 0
2000-10-01 23:33:00 9
2000-10-01 23:50:00 36
2000-10-02 00:07:00 39
2000-10-02 00:24:00 24
Freq: 17T, dtype: int64
"""
_attributes: Tuple[str, ...] = ("key", "level", "freq", "axis", "sort")
def __new__(cls, *args, **kwargs):
if kwargs.get("freq") is not None:
from pandas.core.resample import TimeGrouper
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
# hacky way to set the stacklevel: if cls is TimeGrouper it means
# that the call comes from a pandas internal call of resample,
# otherwise it comes from pd.Grouper
stacklevel = 4 if cls is TimeGrouper else 2
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=stacklevel,
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=stacklevel,
)
cls = TimeGrouper
return super().__new__(cls)
def __init__(
self, key=None, level=None, freq=None, axis=0, sort=False, dropna=True
):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
self.dropna = dropna
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate: bool = True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, _, self.obj = get_grouper(
self.obj,
[self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate,
dropna=self.dropna,
)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj: FrameOrSeries, sort: bool = False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : Series or DataFrame
sort : bool, default False
whether the resulting grouper should be sorted
"""
assert obj is not None
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self.grouper, "name", None) == key and isinstance(
obj, ABCSeries
):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(f"The grouper name {key} is not found")
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(f"The level {level} is not valid")
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind="mergesort")
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self) -> str:
attrs_list = (
f"{attr_name}={repr(getattr(self, attr_name))}"
for attr_name in self._attributes
if getattr(self, attr_name) is not None
)
attrs = ", ".join(attrs_list)
cls_name = type(self).__name__
return f"{cls_name}({attrs})"
class Grouping:
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj Union[DataFrame, Series]:
name : Label
level :
observed : bool, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* codes : ndarray, group codes
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(
self,
index: Index,
grouper=None,
obj: Optional[FrameOrSeries] = None,
name=None,
level=None,
sort: bool = True,
observed: bool = False,
in_axis: bool = False,
dropna: bool = True,
):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.all_grouper = None
self.index = index
self.sort = sort
self.obj = obj
self.observed = observed
self.in_axis = in_axis
self.dropna = dropna
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper._values
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError(f"Level {level} not in index")
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
(
self.grouper,
self._codes,
self._group_index,
) = index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get codes
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper._get_grouper()
else:
if self.grouper is None and self.name is not None and self.obj is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com.asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.grouper, self.all_grouper = recode_for_groupby(
self.grouper, self.sort, observed
)
categories = self.grouper.categories
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._codes = self.grouper.codes
if observed:
codes = algorithms.unique1d(self.grouper.codes)
codes = codes[codes != -1]
if sort or self.grouper.ordered:
codes = np.sort(codes)
else:
codes = np.arange(len(categories))
self._group_index = CategoricalIndex(
Categorical.from_codes(
codes=codes, categories=categories, ordered=self.grouper.ordered
),
name=self.name,
)
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(
self.grouper, (Series, Index, ExtensionArray, np.ndarray)
):
if getattr(self.grouper, "ndim", 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError(f"Grouper for '{t}' not 1-dimensional")
self.grouper = self.index.map(self.grouper)
if not (
hasattr(self.grouper, "__len__")
and len(self.grouper) == len(self.index)
):
grper = pprint_thing(self.grouper)
errmsg = (
"Grouper result violates len(labels) == "
f"len(data)\nresult: {grper}"
)
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, "dtype", None) is not None:
if is_datetime64_dtype(self.grouper):
self.grouper = self.grouper.astype("datetime64[ns]")
elif is_timedelta64_dtype(self.grouper):
self.grouper = self.grouper.astype("timedelta64[ns]")
def __repr__(self) -> str:
return f"Grouping({self.name})"
def __iter__(self):
return iter(self.indices)
_codes: Optional[np.ndarray] = None
_group_index: Optional[Index] = None
@property
def ngroups(self) -> int:
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
return self.grouper.indices
values = Categorical(self.grouper)
return values._reverse_indexer()
@property
def codes(self) -> np.ndarray:
if self._codes is None:
self._make_codes()
return self._codes
@cache_readonly
def result_index(self) -> Index:
if self.all_grouper is not None:
return recode_from_groupby(self.all_grouper, self.sort, self.group_index)
return self.group_index
@property
def group_index(self) -> Index:
if self._group_index is None:
self._make_codes()
assert self._group_index is not None
return self._group_index
def _make_codes(self) -> None:
if self._codes is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, ops.BaseGrouper):
codes = self.grouper.codes_info
uniques = self.grouper.result_index
else:
codes, uniques = algorithms.factorize(
self.grouper, sort=self.sort, dropna=self.dropna
)
uniques = Index(uniques, name=self.name)
self._codes = codes
self._group_index = uniques
@cache_readonly
def groups(self) -> Dict[Hashable, np.ndarray]:
return self.index.groupby(Categorical.from_codes(self.codes, self.group_index))
def get_grouper(
obj: FrameOrSeries,
key=None,
axis: int = 0,
level=None,
sort: bool = True,
observed: bool = False,
mutated: bool = False,
validate: bool = True,
dropna: bool = True,
) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]":
"""
Create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values.
If validate, then check for key/level overlaps.
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError("No group keys passed!")
else:
raise ValueError("multiple levels only valid with MultiIndex")
if isinstance(level, str):
if obj._get_axis(axis).name != level:
raise ValueError(
f"level name {level} is not the name "
f"of the {obj._get_axis_name(axis)}"
)
elif level > 0 or level < -1:
raise ValueError("level > 0 or level < -1 only valid with MultiIndex")
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, [key.key], obj
# already have a BaseGrouper, just return it
elif isinstance(key, ops.BaseGrouper):
return key, [], obj
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(
isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys
)
# is this an index replacement?
if (
not any_callable
and not any_arraylike
and not any_groupers
and match_axis_length
and level is None
):
if isinstance(obj, DataFrame):
all_in_columns_index = all(
g in obj.columns or g in obj.index.names for g in keys
)
else:
assert isinstance(obj, Series)
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings: List[Grouping] = []
exclusions: List[Hashable] = []
# if the actual grouper should be obj[key]
def is_in_axis(key) -> bool:
if not _is_label_like(key):
# items -> .columns for DataFrame, .index for Series
items = obj.axes[-1]
try:
items.get_loc(key)
except (KeyError, TypeError, InvalidIndexError):
# TypeError shows up here if we pass e.g. Int64Index
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr) -> bool:
if not hasattr(gpr, "name"):
return False
try:
return gpr is obj[gpr.name]
except (KeyError, IndexError, ValueError):
# TODO: ValueError: Given date string not likely a datetime.
# should be KeyError?
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr, axis=axis)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr, axis=axis):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
"must be same length"
)
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (
Grouping(
group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis,
dropna=dropna,
)
if not isinstance(gpr, Grouping)
else gpr
)
groupings.append(ping)
if len(groupings) == 0 and len(obj):
raise ValueError("No group keys passed!")
elif len(groupings) == 0:
groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp)))
# create the internals grouper
grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val) -> bool:
return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val))
def _convert_grouper(axis: Index, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError("Grouper and axis must be same length")
return grouper
else:
return grouper
| bsd-3-clause |
jmhsi/justin_tinker | data_science/j_utils.py | 1 | 19568 |
# File with utility functions for models in pytorch
# Imports ___________________________________________________________________
import os
import re
import torch
import torchvision.datasets as datasets
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm_notebook
from torchvision.utils import make_grid
from torch.autograd import Variable
import pandas as pd #for some debugging
import shutil, errno
# copy function
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
# Optimizers not put in master yet __________________________________________
class Nadam(torch.optim.Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (state['step'] * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 *
(0.96 ** ((state['step'] + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq.div(1. - bias_correction2)
denom = exp_avg_sq_prime.sqrt_().add_(group['eps'])
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) /
(1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 /
(1. - m_schedule_next), exp_avg, denom)
return loss
# Visualizing Functions _____________________________________________________
def show_image(image):
'''Shows a PIL image'''
plt.figure()
plt.imshow(image)
plt.show()
def get_example_image(image_fullpaths):
'''From an array-like, choose one image'''
return Image.open(np.random.choice(image_fullpaths))
# Making Datasets ___________________________________________________________
class TestDataset(Dataset):
"""Args: path to dir, transforms; makes test dataset for images"""
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.transform = transform
self.samples = [filename for filename in os.listdir(root_dir) if '.jpg' in filename]
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.samples[idx])
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
#image = Image.fromarray(image)
return image, int(re.findall(r'\d+', self.samples[idx])[0])
class DfNumpyDataset(Dataset):
'''Makes dataset from df or numpy array of data and targets'''
def __init__(self, data, targets):
self.data = data
self.targets = targets
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx,:], self.targets[idx,:]
def get_dataset(data, targets):
return DfNumpyDataset(data, targets)
def get_image_dataset(root, tsfm=None):
'''Makes dataset from images following structure of root/class/img.png'''
return datasets.ImageFolder(root, transform=tsfm)
def get_loader(dataset, use_cuda=True, batch_size=64, shuffle=False):
'''Makes iterator/batcher for iterating over to train nn. Feed it a
Dataset (e.g. get_image_dataset)'''
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, pin_memory=use_cuda)
def precompute_vals(model, data_loader, test_set=False):
'''Given a model (up to where computation is wanted) and data loader, precomputes values'''
outputs_list = []
labels_list = []
for i, data in tqdm_notebook(enumerate(data_loader)):
# get inputs and labels
inputs, labels = data
# wrap in Variable
try:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
labels = labels.view(-1)
except AttributeError:
inputs = Variable(inputs.cuda())
# forward pass
outputs = model(inputs)
outputs_list.append(outputs)
labels_list.append(labels)
del inputs
del labels
outputs_ret = torch.cat(outputs_list)
try:
labels_ret = torch.cat(labels_list)
except TypeError:
labels_ret = [ids for sublist in labels_list for ids in sublist]
return outputs_ret, labels_ret
# if test_set==False:
# else:
# # for test sets
# outputs_list = []
# ids_list = []
# for i, data in tqdm_notebook(enumerate(data_loader)):
# # get inputs and ids
# inputs, ids = data
# # wrap in Variable
# inputs, ids = Variable(inputs.cuda()), ids
# # forward pass
# outputs = model(inputs)
# outputs_list.append(outputs)
# ids_list.extend(ids)
# del inputs
# del ids
# return torch.cat(outputs_list), ids_list
def save_precompute(X, y, path, model_name, filename):
precom_savedir = make_savedir(path, 'precom_'+model_name)
X_converted = X.data.cpu().numpy()
try:
y_converted = y.data.cpu().numpy().reshape(-1,1)
except AttributeError:
y_converted = np.array(y).reshape(-1,1)
torch.save((X_converted,y_converted), os.path.join(precom_savedir, filename))
print('Saved at {0}'.format(os.path.join(precom_savedir, filename)))
# Image Standardization _____________________________________________________
def get_mean_rgb(train_paths):
'''given an array-like of all paths to train, under structure of
root/class/img, will return mean rgb scaled to 0-1 (from 0-255)'''
return np.array([(np.array(Image.open(path)) / 255).mean(0).mean(0) for path in tqdm_notebook(train_paths)]).mean(0)
def get_std_dev_rgb(train_paths, mean_rgb):
'''given an array-like of all paths to train, under structure of
root/class/img, will return std_dev rgb scaled to 0-1 (from 0-255)'''
return np.array([(((np.array(Image.open(path)) / 255) - mean_rgb)**2).mean(0).mean(0) for path in tqdm_notebook(train_paths)]).mean(0)**.5
class UnNormalize(object):
'''To undo a Normalize transform.'''
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# Dir Functions _____________________________________________________________
# different dir functions for classification tasks vs neural style transfer
def get_image_classes_clf(root):
'''Returns a list of classes when structure is root/class/...'''
return [file for file in os.listdir(root) if '.DS_Store' not in file]
def get_image_fullpaths_clf(root, img_fmt):
'''Returns list of full paths to samples in root assuming root/class/...'''
return [os.path.join(root, classes, path) for classes in get_image_classes_clf(root) for path in os.listdir(
os.path.join(root, classes)) if img_fmt in path]
def get_image_fullpaths_nst(root, img_fmt):
'''Returns list of full paths to samples in root assuming root/img.jpeg'''
return [os.path.join(root, file) for file in os.listdir(root) if img_fmt in file]
# Training Models ___________________________________________________________
def make_savedir(path, model_name):
'''Makes a savedir to hold saves if the dir does not exist. returns savedir path'''
savedir = os.path.join(path, 'save_{0}'.format(model_name))
if not os.path.isdir(savedir):
os.mkdir(savedir)
return savedir
def train_model(model, model_name, train_loader, valid_loader, optimizer, criterion, n_epochs, save_epoch, savedir, variance_pct_thrsh, patience_epoch, pct_change, decay_rate, continue_training=False, g_epoch=1, verbose=False, lr_scheduler=False, early_stop=False):
'''Basic setup for training models and saving every multiple of save_epoch.
This assumes the full model can fit on one gpu. If different parts are
on different GPUs, will need to specifically set devices numbers in
cuda calls based on architecture of model.
model = pytorch model
model_name = model name
train_loader = loader made from train dataset
valid_loader = loader made from valid dataset
optimizer = choice of optimizer
criterion = loss function
n_epochs = number of epochs to train for
save_epochs = save a state dict every save_epochs
savedir = dir to save state dicts in
patience_epoch = epochs to wait where change in loss is below pct_change
before decaying learning rate
pct_change = the percent change difference desired in loss
decay_rate = float from 0-1 which will multiply the current learning rate
continue_training = specifies of training is continuing or fresh
g_epoch = what global train epoch number. 1 if training fresh.
returns g_epoch to keep track of how many training epochs
'''
epoch_list = []
loss_list = []
train_accuracy_list = []
valid_accuracy_list = []
lr_list = []
early_stopping_flag = 0
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
plt.ion()
fig.show()
fig.canvas.draw()
k = 0
if continue_training:
try:
g_epoch = g_epoch[0]
except:
pass
epochs = range(g_epoch, g_epoch + n_epochs)
for epoch in tqdm_notebook(epochs):
# epoch stats for plotting
correct = 0
seen = 0
current_loss = 0.0
g_epoch += 1
for i, data in enumerate(train_loader):
# get inputs and labels
inputs, labels = data
# wrap in Variable
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
labels = labels.view(-1)
# zero the gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
# import pdb; pdb.set_trace()
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# statistic updates
current_loss += loss.data[0]
seen += len(inputs)
# import pdb; pdb.set_trace()
correct += (labels == outputs.max(1)[1]).cpu().data.numpy().sum()
# Plotting ___________________________________________________________
epoch_list.append(epoch)
if not loss_list:
last_loss = 9999999
else:
last_loss = loss_list[-1]
valid_accuracy = check_accuracy(valid_loader, model)
try:
if valid_accuracy > highest_valid_acc:
save_best = True
highest_valid_acc = valid_accuracy
else:
save_best = False
except:
highest_valid_acc = 0
save_best = False
train_accuracy = float(correct)/float(seen)
if verbose == True:
print('epoch: {0}, loss: {3}, train_acc: {1}, valid_acc: {2}'.format(epoch,train_accuracy, valid_accuracy, current_loss))
loss_list.append(current_loss)
train_accuracy_list.append(train_accuracy)
valid_accuracy_list.append(valid_accuracy)
lr_list.append(optimizer.param_groups[0]['lr'])
ax1.clear()
ax1.plot(epoch_list, loss_list)
ax2.clear()
ax2.plot(epoch_list, train_accuracy_list, 'ro', label='train')
ax2.plot(epoch_list, valid_accuracy_list, label='valid')
ax3.plot(epoch_list, lr_list)
plt.title("Epoch: {0}, loss left, accuracy middle, learning rate right".format(epoch))
ax2.legend(loc='best')
fig.canvas.draw()
# To decay learning rate _____________________________________________
decrease_lr = 0
if lr_scheduler:
if train_accuracy > .93:
if decrease_lr % patience_epoch == 0:
optimizer.param_groups[0]['lr'] *= decay_rate
decrease_lr += 1
# if abs((last_loss - current_loss) / last_loss) < pct_change:
# k += 1
# if k >= patience_epoch:
# k = 0
# optimizer.param_groups[0]['lr'] *= decay_rate
# else:
# k = 0
# Conditionally save if variance is starting to grow btwn datasets, early stopping as well
if early_stop:
if (valid_accuracy < train_accuracy) & (((train_accuracy - valid_accuracy)/train_accuracy) > variance_pct_thrsh):
print("Epoch passing variance cutoff: {0}".format(epoch))
early_stopping_flag += 1
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_{0}'.format(epoch)))
if early_stopping_flag > patience_epoch*2:
print('Triggered early stopping flag')
break
else:
early_stopping_flag = 0
# Saving _____________________________________________________________
if (epoch) % save_epoch == 0:
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_{0}'.format(epoch)))
if save_best:
torch.save(model.state_dict(), os.path.join(
savedir, model_name + '_best'.format(epoch)))
# Free up cuda memory again ______________________________________________
del inputs
del labels
del loss
del optimizer
del criterion
return g_epoch, epoch_list, loss_list, train_accuracy_list, valid_accuracy_list
# Validation Functions ______________________________________________________
def get_classes_strings(classes, labels_ids):
# returns the classes in string format
return [classes[label_id] for label_id in labels_ids]
def get_prediction_classes_ids(predictions):
# returns the predictions in id format
predictions_ids = predictions.cpu().data.numpy().argmax(1)
return predictions_ids
def get_prediction_classes_strings(classes, predictions):
# returns the predictions in string format
return get_classes_strings(classes, get_prediction_classes_ids(predictions))
def show_iter(img):
plt.figure()
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest')
plt.show()
def predictions_vs_actuals(iterator, model, rvrs_tsfm, classes):
model.eval()
images, labels = iterator.next()
img_list = [rvrs_tsfm(img) for img in images]
labels_string = get_classes_strings(classes, labels.numpy())
show_iter(make_grid(img_list, padding=10))
# display the predictons for the images above
predictions = model(Variable(images.cuda()))
predictions_string = get_prediction_classes_strings(classes, predictions)
print('Actuals: ', labels_string)
print('Predictions: ', predictions_string)
del predictions
del images
del labels
def make_predictions(data_loader, model):
model.eval()
pred_list = []
for i, data in enumerate(data_loader):
images, labels = data
labels=labels.view(-1)
predictions = list(model(Variable(images.cuda())).max(1)[1].cpu().data)
pred_list.extend(predictions)
return pred_list
def check_accuracy(data_loader, model):
model.eval()
correct = 0
seen = 0
total_len = len(data_loader)
for i, data in enumerate(data_loader):
images, labels = data
labels=labels.view(-1)
seen += len(images)
predictions = model(Variable(images.cuda()))
# labels is tensor, predictions is variable; predictions pull data out to numpy
correct += (labels.numpy() == predictions.max(1)[1].cpu().data.numpy()).sum() #predictions.max(1)[1] returns indicies of max preds
# import pdb; pdb.set_trace()
del images
del labels
del predictions
# print('Accuracy: {0}, Saw: {1}, Correct: {2}'.format(correct/seen, seen, correct))
return float(correct)/float(seen) | apache-2.0 |
crystal150/CS350 | generator.py | 1 | 4662 | import pickle
import csv
import re
from sklearn import linear_model
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
TRAIN_FILE = "train.csv"
########### Ancillary functions ###########
# From .csv file to array
def readCsv(fname, skipFirst = True, delimiter = ","):
reader = csv.reader (open (fname, "rb"), delimiter = delimiter)
rows = []
for row in reader :
if skipFirst :
skipFirst = False
continue
rows.append(row)
return rows
# Natural language normalizing
def normalize(f, stemmer = None):
f = [x.lower() for x in f]
f = [x.replace("\\n"," ") for x in f]
f = [x.replace("\\t"," ") for x in f]
f = [x.replace("\\xa0"," ") for x in f]
f = [x.replace("\\xc2"," ") for x in f]
f = [x.replace(" u "," you ") for x in f]
f = [x.replace(" em "," them ") for x in f]
f = [x.replace(" da "," the ") for x in f]
f = [x.replace(" yo "," you ") for x in f]
f = [x.replace(" ur "," you ") for x in f]
f = [x.replace("won't", "will not") for x in f]
f = [x.replace("can't", "cannot") for x in f]
f = [x.replace("i'm", "i am") for x in f]
f = [x.replace(" im ", " i am ") for x in f]
f = [x.replace("ain't", "is not") for x in f]
f = [x.replace("'ll", " will") for x in f]
f = [x.replace("'t", " not") for x in f]
f = [x.replace("'ve", " have") for x in f]
f = [x.replace("'s", " is") for x in f]
f = [x.replace("'re", " are") for x in f]
f = [x.replace("'d", " would") for x in f]
# Stemming
if stemmer == None :
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\S*>","", x)[0].strip() for x in f]
else :
for i in range(len(f)):
sen = list()
for w in nltk.word_tokenize(f[i]):
sen.append(stemmer.stem(w))
f[i] = " ".join(sen)
return f
#############################################
########### Machine Learning Part ###########
# Machine learns for n-grams ( char or word )
def ngrams(train_sen, train_label, start, final, analyzer_char = False):
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
train_sen = normalize(train_sen)
# TF IDF --> frequent words are less important
vtzer = TfidfVectorizer (ngram_range = (start, final), stop_words = 'english', analyzer = analyzer_type, sublinear_tf = True)
X_train = vtzer.fit_transform (train_sen)
Y_train = train_label
return X_train, vtzer
# Machine learns for special cases
def specialCases(train_sen, train_label):
g = [x.lower().replace("you are"," SSS ").replace("you're"," SSS ").replace(" ur ", " SSS ").split("SSS")[1:] for x in train_sen]
f = []
for x in g:
fts = " "
x = normalize(x)
for y in x:
w = y.strip().replace("?",".").split(".")
fts = fts + " " + w[0]
f.append(fts)
X_train, vtzer = ngrams(train_sen, train_label, 1, 1)
return X_train, vtzer
# Returns model which consider significant cases
def learn(train_sen, train_label, vector_add = False):
print "Unigram word learning..."
X_train1, vtzer1 = ngrams(train_sen, train_label, 1, 1)
print "Bigram word learning..."
X_train2, vtzer2 = ngrams(train_sen, train_label, 2, 2)
print "Trigram word learning..."
X_train3, vtzer3 = ngrams(train_sen, train_label, 3, 3)
print "Quadrigram char learning..."
X_train4, vtzer4 = ngrams(train_sen, train_label, 4, 4, analyzer_char = True)
print "Quinquegram char learning..."
X_train5, vtzer5 = ngrams(train_sen, train_label, 5, 5, analyzer_char = True)
print "Special case learning..."
X_train6, vtzer6 = specialCases(train_sen, train_label)
# Sum of all model
vtzers = [vtzer1, vtzer2, vtzer3, vtzer4, vtzer5, vtzer6]
X_train = hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6])
Y_train = train_label
print "Construct LogisticRegression.."
model = linear_model.LogisticRegression( C = 3 )
model.fit(X_train, Y_train)
return model, vtzers
###########################################
################ Main Part ################
if __name__ == "__main__":
train_data = readCsv(TRAIN_FILE)
train_sen = [x[2] for x in train_data]
train_label = [x[0] for x in train_data]
print("============================== Insult Comment Training ==============================");
model, vtzers = learn(train_sen, train_label)
model_file = open('model.pickle', 'wb')
vtzers_file = open('vtzers.pickle', 'wb')
pickle.dump(model, model_file)
pickle.dump(vtzers, vtzers_file)
| mit |
ForestClaw/forestclaw | applications/geoclaw/tohoku/maketopo.py | 1 | 2298 | """
Create topo and dtopo files needed for this example:
etopo10min120W60W60S0S.asc download from GeoClaw topo repository
dtopo_usgs100227.tt3 create using Okada model
Prior to Clawpack 5.2.1, the fault parameters we specified in a .cfg file,
but now they are explicit below.
Call functions with makeplots==True to create plots of topo, slip, and dtopo.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import clawpack.clawutil.data
try:
FCLAW = os.environ['FCLAW']
except:
raise Exception("*** Must first set FCLAW environment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(FCLAW, 'applications', 'geoclaw', 'scratch')
def get_topo(makeplots=False):
"""
Retrieve the topo file from the GeoClaw repository.
"""
from clawpack.geoclaw import topotools
topo_fname = 'etopo1min130E210E0N60N.asc'
url = 'http://depts.washington.edu/clawpack/geoclaw/topo/etopo/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
if 0:
# this topo file isn't really needed for modeling around Kahului
topo_fname = 'hawaii_6s.txt'
url = 'http://depts.washington.edu/clawpack/geoclaw/topo/hawaii/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
topo_fname = 'kahului_1s.txt'
url = 'http://depts.washington.edu/clawpack/geoclaw/topo/hawaii/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
topo_fname = 'fujii.txydz'
url = 'http://depts.washington.edu/clawpack/geoclaw/dtopo/tohoku/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
if makeplots:
from matplotlib import pyplot as plt
topo = topotools.Topography(os.path.join(scratch_dir,topo_fname), topo_type=2)
topo.plot()
fname = os.path.splitext(topo_fname)[0] + '.png'
plt.savefig(fname)
print("Created ",fname)
if __name__=='__main__':
get_topo(False)
| bsd-2-clause |
rosswhitfield/mantid | scripts/DiamondAttenuationCorrection/FitTransReadUB.py | 3 | 47813 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
'''
1. all the functions are defined and built consistently.
Data types:
- Use only numpy arrays to ensure consistency across formatting and type
*** x, x0 = parameters vector - 1D numpy array
*** setang1, setang2 = angles for refinement - 1D numpy arrays, 3 elements
*** hkl1, hkl2 = numpy arrays (3 columns) having all the hkl indices from the 2 diamonds
*** UB1, UB2 = numpy arrays (3x3) holds UB matrices from input files
***
'''
# Import all needed libraries
from matplotlib import pyplot as plt
import numpy as np
import itertools as itt
#import UBMatrixGenerator as UBMG
import scipy.optimize as sp
__author__ = 'cip'
# Define global variables
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global figure_name_attenuation, run_number
def dlmread(filename):
'''
Function to read parameters from file after previous fit
'''
content = []
with open(filename, "r") as f:
for line in f.readlines():
content.append(float(line))
return np.array(content)
def calcDspacing(a, b, c, alp, bet, gam, h, k, l):
'''
%CALCDSPACING for general unit cell: a,b,c,alp,bet,gam returns d-spacing for
%reflection h,k,l
%
'''
ca = np.cos(np.radians(alp))
cb = np.cos(np.radians(bet))
cg = np.cos(np.radians(gam))
sa = np.sin(np.radians(alp))
sb = np.sin(np.radians(bet))
sg = np.sin(np.radians(gam))
oneoverdsq = (1.0 - ca ** 2 - cb ** 2 - cg ** 2 + 2 * ca * cb * cg) ** (-1) * \
((h * sa / a) ** 2 + (k * sb / b) ** 2 + (l * sg / c) ** 2
+ (2 * k * l / (b * c)) * (cb * cg - ca) + (2 * l * h / (c * a)) * (cg * ca - cb)
+ (2 * h * k / (a * b)) * (ca * cb - cg))
d = np.sqrt(1.0 / oneoverdsq)
return d
def genhkl(hmin, hmax, kmin, kmax, lmin, lmax):
'''
genhkl generates array of hkl values
total number of points will be (hmax-hmin)
'''
hvals = np.arange(hmin, hmax + 1, 1)
kvals = np.arange(kmin, kmax + 1, 1)
lvals = np.arange(lmin, lmax + 1, 1)
nh = len(hvals)
nk = len(kvals)
nl = len(lvals)
l = 0
hkl = np.zeros(shape=(nh * nl * nk, 3))
for i in range(nh):
for j in range(nk):
for k in range(nl):
hkl[l][0] = hvals[i]
hkl[l][1] = kvals[j]
hkl[l][2] = lvals[k]
l += 1
return hkl
def mod(a, b):
return a % b
def forbidden(h, k, l):
'''
%returns logical positive if this hkl is fobidden according to
% diamond reflections conditions....
'''
ah = abs(h)
ak = abs(k)
al = abs(l)
if ((h == 0) and (k == 0) and (l == 0)):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
if ((ah == 2) and (ak == 2) and (al == 2)): # allowed, but vanishingly weak
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 1
if ((h != 0) and (k != 0) and (l != 0)): # general hkl
term1 = h + k
term2 = h + l # all have to be even
term3 = k + l
if not ((term1 % 2) == 0 and (term2 % 2) == 0 and (term3 % 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# % condition 2
if ((h == 0) and (k != 0) and (l != 0)): # 0kl reflections
term1 = k + l
mod4 = mod(term1, 4)
if not (mod4 == 0 and mod(k, 2) == 0 and mod(l, 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 3
if (h == k): # hhl reflections
if not (mod(h + l, 2) == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
# condition 4
if ((h == 0) and (k == 0) and (l != 0)): # 00l reflections not including 000
mod4 = mod(l, 4)
if not (mod4 == 0):
result = 1
boolresult = bool(result)
return boolresult
else:
result = 0
boolresult = bool(result)
return boolresult
def allowedDiamRefs(hmin, hmax, kmin, kmax, lmin, lmax):
'''
%UNTITLED6 generates a list of allowed reflections for diamond between
% limits provided sorted descending according to d-spacing
'''
# obtain all hkl within limits...
allhkl = genhkl(hmin, hmax, kmin, kmax, lmin, lmax)
# now purge those violating extinction conditions...
n = len(allhkl)
# set all forbidden hkl's to zero
# hkl or lhk or klh
for i in range(n):
h = allhkl[i][0]
k = allhkl[i][1]
l = allhkl[i][2]
if forbidden(h, k, l) or forbidden(l, h, k) or forbidden(k, l, h):
allhkl[i] = 0 # set equal to zero
k = 0
d = [] # np.zeros(0)
# create new array with all h!=0 k!=0 l!=0
hkl = np.zeros(shape=(0, 3))
for i in range(n):
if not (allhkl[i][0] == 0 and allhkl[i][1] == 0 and allhkl[i][2] == 0):
hkl = np.vstack((hkl, [allhkl[i][0], allhkl[i][1], allhkl[i][2]]))
d.append(calcDspacing(3.56683, 3.56683, 3.56683, 90,
90, 90, hkl[k][0], hkl[k][1], hkl[k][2]))
k += 1
d = np.array(d)
# ORDER hkl according to d-spacing
B = sorted(d)[::-1] # returns d sorted in descending order
IX = np.argsort(d)[::-1] # and corresponding elements
sorthkl = np.zeros(shape=(k, 3))
for i in range(k):
sorthkl[i] = hkl[IX[i]]
d[i] = B[i]
# print('hkl: {0:0.3f} {1:0.3f} {2:0.3f} d-spacing: {3:0.3f} A'.format(sorthkl[i][0], sorthkl[i][1],
# sorthkl[i][2], d[i]))
return sorthkl
def getISAWub(fullfilename):
'''
%getISAWub reads UB determined by ISAW and stored in file "fname"
% Detailed explanation goes here
% [filename pathname ~] = ...
% uigetfile('*.dat','Choose UB file (generated by ISAW)');
% fullfilename = [pathname filename];
'''
fileID = fullfilename
if fileID == 1:
print(('Error opening file: ' + fullfilename))
f = open(fileID, "r")
lines = f.readlines()
f.close()
# Build UB matrix and lattice
UB = np.zeros(shape=(3, 3))
lattice = np.zeros(shape=(2, 6))
for i in range(3):
UB[i][0], UB[i][1], UB[i][2] = lines[i].split()
UB = UB.transpose()
for i in range(3, 5):
lattice[i - 3][0], lattice[i - 3][1], \
lattice[i - 3][2], lattice[i - 3][3], \
lattice[i - 3][4], lattice[i - 3][5], \
non = lines[i].split()
print('Successfully got UB and lattice')
return UB, lattice
def pkintread(hkl, loc):
'''
%reads calculated Fcalc and converts to
%Fobs using Buras-Gerard Eqn.
%inputs are hkl(nref,3) and
% loc(nref,3), which contains, lambda, d-spacing and ttheta for
% each of the nref reflections.
% get Fcalcs for diamond, generated by GSAS (using lattice parameter 3.5668
% and Uiso(C) = 0.0038
% disp('in pkintread');
returns pkint = np. array - 1D vector
'''
# A = np.genfromtxt('diamond_reflist.csv', delimiter=',', skip_header=True)
# print A
A = np.array([[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 8.00000000e+00,
2.06110000e+00, 5.54000000e+04],
[2.00000000e+00, 2.00000000e+00, 0.00000000e+00, 1.20000000e+01,
1.26220000e+00, 7.52000000e+04],
[3.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
1.07640000e+00, 2.98000000e+04],
[2.00000000e+00, 2.00000000e+00, 2.00000000e+00, 8.00000000e+00,
1.03060000e+00, 2.50000000e-25],
[4.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00,
8.92500000e-01, 4.05000000e+04],
[3.00000000e+00, 3.00000000e+00, 1.00000000e+00, 2.40000000e+01,
8.19000000e-01, 1.61000000e+04],
[4.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
7.28700000e-01, 2.18000000e+04],
[5.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
6.87000000e-01, 8.64000000e+03],
[3.00000000e+00, 3.00000000e+00, 3.00000000e+00, 8.00000000e+00,
6.87000000e-01, 8.64000000e+03],
[4.00000000e+00, 4.00000000e+00, 0.00000000e+00, 1.20000000e+01,
6.31100000e-01, 1.17000000e+04],
[5.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
6.03400000e-01, 4.65000000e+03],
[4.00000000e+00, 4.00000000e+00, 2.00000000e+00, 2.40000000e+01,
5.95000000e-01, 1.83000000e-12],
[6.00000000e+00, 2.00000000e+00, 0.00000000e+00, 2.40000000e+01,
5.64500000e-01, 6.31000000e+03],
[5.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
5.44400000e-01, 2.50000000e+03],
[6.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
5.38200000e-01, 8.80000000e-26],
[4.00000000e+00, 4.00000000e+00, 4.00000000e+00, 8.00000000e+00,
5.15300000e-01, 3.40000000e+03],
[5.00000000e+00, 5.00000000e+00, 1.00000000e+00, 2.40000000e+01,
4.99900000e-01, 1.35000000e+03],
[7.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
4.99900000e-01, 1.35000000e+03],
[6.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01,
4.77100000e-01, 1.83000000e+03],
[7.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
4.64800000e-01, 7.25000000e+02],
[5.00000000e+00, 5.00000000e+00, 3.00000000e+00, 2.40000000e+01,
4.64800000e-01, 7.25000000e+02],
[8.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00,
4.46200000e-01, 9.84000000e+02],
[7.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
4.36100000e-01, 3.90000000e+02],
[6.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01,
4.32900000e-01, 1.53000000e-13],
[6.00000000e+00, 6.00000000e+00, 0.00000000e+00, 1.20000000e+01,
4.20700000e-01, 5.30000000e+02],
[8.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01,
4.20700000e-01, 5.30000000e+02],
[5.00000000e+00, 5.00000000e+00, 5.00000000e+00, 8.00000000e+00,
4.12200000e-01, 2.10000000e+02],
[7.00000000e+00, 5.00000000e+00, 1.00000000e+00, 4.80000000e+01,
4.12200000e-01, 2.10000000e+02],
[6.00000000e+00, 6.00000000e+00, 2.00000000e+00, 2.40000000e+01,
4.09500000e-01, 1.98000000e-26],
[8.00000000e+00, 4.00000000e+00, 0.00000000e+00, 2.40000000e+01,
3.99100000e-01, 2.85000000e+02],
[7.00000000e+00, 5.00000000e+00, 3.00000000e+00, 4.80000000e+01,
3.91900000e-01, 1.13000000e+02],
[9.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01,
3.91900000e-01, 1.13000000e+02],
[8.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01,
3.89500000e-01, 4.44000000e-14],
[6.00000000e+00, 6.00000000e+00, 4.00000000e+00, 2.40000000e+01,
3.80600000e-01, 1.53000000e+02],
[9.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01,
3.74200000e-01, 6.08000000e+01],
[8.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01,
3.64400000e-01, 8.26000000e+01],
[9.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01],
[7.00000000e+00, 5.00000000e+00, 5.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01],
[7.00000000e+00, 7.00000000e+00, 1.00000000e+00, 2.40000000e+01,
3.58800000e-01, 3.27000000e+01]])
diamd = A[:, 4]
# diamMult = A[:, 3] # unused variable
diamFCalcSq = A[:, 5]
nref = hkl.shape[0]
# % disp(['there are: ' num2str(nref) ' reflections']);
# % whos loc
'''
% [i,j] = size(x);
% dipspec = zeros(i,j); %array containing dip spectrum
% difspec = zeros(i,j); %array containing diffraction spectrum
% d = x/sqrt(2); %dspacings for this lamda range at 90 degrees
% In order to calculate the scattered intensity I from the Fcalc^2, need to
% apply the Buras-Gerward formula:
%
% Fcalc^2 = I*2*sin(theta)^2/(lamda^2*A*E*incFlux*detEffic)
'''
pkint = np.zeros(nref)
for i in range(nref):
if loc[i][0] > 0:
# % satisfies Bragg condition (otherwise ignore)
Fsq = Fsqcalc(loc[i][1], diamd, diamFCalcSq)
# % Fsq = 1;
L = (np.sin(np.radians(loc[i][2] / 2.0))) ** 2 # Lorentz correction
R = 1.0 # %dipLam(i)^4; %reflectivity correction
A = 1.0 # %Absorption correction
Ecor = 1
pkint[i] = Fsq * R * A / (L * Ecor) # %scattered intensity
'''
% whos difspec
% whos van
% whos dipspec
% difspec = difspec.*van;
% dipspec = dipspec.*van;
% figure(1)
% plot(d,difspec)
'''
return pkint
def Fsqcalc(d, diamd, diamFCalcSq):
'''
% diamond reflections are identified according to their d-spacing
% and corresponding calculated Fsq are returned
% global sf111 sf220 sf311 sf400 sf331
'''
# n = len(diamd) # unused variable
ref = d
dif = abs(diamd - ref)
i = dif.argmin(0) # i is index of diamd closest to d
Fsq = diamFCalcSq[i]
return Fsq
def pkposcalc(hkl, UB, setang):
'''
% calculates some useful numbers from (ISAW calculated) UB
% hkl is a 2D array containing all hkl's
%
'''
ome = setang[0]
phi = setang[1]
chi = setang[2]
thkl = hkl.transpose()
Q = UB.dot(thkl)
Rx = np.array([[1, 0, 0], [0, np.cos(np.radians(ome)), -np.sin(np.radians(ome))],
[0, np.sin(np.radians(ome)), np.cos(np.radians(ome))]])
Ry = np.array([[np.cos(np.radians(phi)), 0, np.sin(np.radians(phi))], [0, 1, 0],
[-np.sin(np.radians(phi)), 0, np.cos(np.radians(phi))]])
Rz = np.array([[np.cos(np.radians(chi)), -np.sin(np.radians(chi)), 0],
[np.sin(np.radians(chi)), np.cos(np.radians(chi)), 0], [0, 0, 1]])
Rall = Rz.dot(Ry).dot(Rx) # all three rotations
# str = sprintf('initial: %6.4f %6.4f %6.4f',Q);
# disp(str)
Q = Rall.dot(Q)
magQ = np.sqrt((Q * Q).sum(axis=0))
'''
# str = sprintf('Scattering vector: %6.4f %6.4f %6.4f',Q);
# if show==1
# disp(str)
# end
% %calculate angle with incident beam i.e. (-1 0 0)
% beam = [1 0 0];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to beam: %6.4f',alpha);
% if show==1
% disp(str)
% end
% beam = [0 1 0];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to y: %6.4f',alpha);
% if show==1
% disp(str)
% end
% beam = [0 0 1];
% alpha = acosd(dot(Q,beam)/norm(Q));
% str = sprintf('Angle scat. vect. to z: %6.4f',alpha);
% if show==1
% disp(str)
% end
% Q is a vector pointing to the reciprocal lattice point corresponding to
% vector hkl. The coordinate system is in frame I that is right handed with x pointing along
% the beam direction and z vertical.
'''
d = (1.0 / magQ) # by definition (note ISAW doesn't use 2pi factor)
d = d.transpose()
'''
% In frame I the incident beam vector will be of the form [k 0 0]
% where k = 1/lambda
% by considering the scattering relation that Q=k_f-k_i, can show that the dot product of
% -k_i.Q gives the scattering angle 2theta, thus:
'''
ttheta = 180 - 2 * np.degrees(np.arccos(-Q[0, :] / magQ))
ttheta = ttheta.transpose()
# and Bragg's law gives:
lambda_1 = 2 * d * np.sin(np.radians(ttheta / 2))
lambda_1 = lambda_1.transpose()
'''
%
% str = sprintf('for hkl: %3i%3i%3i',hkl(1),hkl(2),hkl(3));
% disp(str)
% str = sprintf('d-spacing is: %6.4f',d);
% disp(str)
% str = sprintf('ttheta is: %6.4f',ttheta);
% disp(str)
% str = sprintf('lambda is: %6.4f',lambda);
% disp(str)
'''
return lambda_1, d, ttheta
def getMANTIDdat_keepbinning(csvfile):
'''
getMANTIDdat reads data from mantid "SaveAscii" output
% input file name should be 'csvfilename'.csv
% data are returned with binning (xmin:xbin:xmax)
returns TOF, y , e
'''
fid = open(csvfile, "r")
lines = fid.readlines()
x = []
y = []
e = []
if fid < 0:
print(('Error opening file: ' + csvfile))
for i in range(1, len(lines)):
a, b, c = lines[i].split(",")
x.append(float(a))
y.append(float(b))
e.append(float(c))
fid.close()
x = np.array(x)
y = np.array(y)
e = np.array(e)
return x, y, e
def findeqvs(hkl):
'''
FINDEQVS runs through array of hkls and labels those that are equivalent
%in the m-3m point group.
%
% there are n reflections.
% hkl has dimensions nx3
% eqvlab has dimensions nx1
'''
n, m = hkl.shape
eqvlab = np.zeros(n)
lab = 1
for i in range(n):
if eqvlab[i] == 0: # then it's not been checked yet, so check it
eqvlab[i] = lab
refhkl = np.array([abs(hkl[i][0]), abs(hkl[i][1]), abs(hkl[i][2])])
for j in range(i + 1, n): # check remaining indices
comphkl = np.array(
[abs(hkl[j][0]), abs(hkl[j][1]), abs(hkl[j][2])])
# all possible permutations
permcomphkl = list(itt.permutations(comphkl))
nperm = len(permcomphkl)
for k in range(nperm):
if refhkl[0] == permcomphkl[k][0] and refhkl[1] == permcomphkl[k][1] and \
refhkl[2] == permcomphkl[k][2]:
eqvlab[j] = lab
lab += 1
return eqvlab, lab
def showx3(x):
'''
%showx displays all parameters for refinement in reasonably intelligible
%form
Input : parameter vector and the sets of hkl indices for the diamonds
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
# nref1 = hkl1.shape[0] # % number of reflections to integrate over # unused variable
# nref2 = hkl2.shape[0] # % number of reflections to integrate over # unused variable
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 8 - 1]
# bgd = x[neqv1 + neqv2 + 8 - 1:neqv1 + neqv2 + 9 + 2 - 1] # unused variable
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n')
print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format(
1, setang1[0], setang1[1], setang1[2])))
print(('pkmult1: {0}\n'.format(pkmult1)))
print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format(
2, setang2[0], setang2[1], setang2[2])))
print(('pkmult2: {0}\n'.format(pkmult2)))
print(('Scale factor: {0}\n'.format(sf)))
print(('pkwid1: {0}\n'.format(pkwid1)))
print(('pkwid2: {0}\n'.format(pkwid2)))
print(('Rel. scale factor : {0}\n'.format(relsf)))
print(('Lambda multiplier: {0}\n'.format(delam)))
print(('L2 sample to detector: {0} m\n'.format(L2)))
print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n')
def SimTransOutput3(name, x):
'''
%SimTrans calculates transmission spectrum from two crystals
% lam - array containing wavelengths to calc over
% hkl - contains all Nref hkl's that calculation is performed for
% bgd - array containing coefficients of polynomial for background
% sf - overall scale factor
% pktype - 1 = gauss; 2 = lorentz; ...
% UB1 - UB matrix for first crystal
% setang1 - setting angles for first crystal (deviations from ideal UB
% location).
% pkpars1 - position(lambda), position(d-spacing), position(ttheta), width, intensity for each Nref reflections
% UB2,setang2,pkpars2 - as above, for second crystal
%
% M. Guthrie 21st Jan 2014
%
% calculate background profile
% determine number of coeffs bgd
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global figure_name_attenuation, run_number
nref1 = hkl1.shape[0] # % number of reflections to integrate over
nref2 = hkl2.shape[0] # % number of reflections to integrate over
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 7]
bgd = x[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1]
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
if fxsamediam == 1:
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * relsf
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
else:
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
# number of lambda points to calculate over
npt = shftlam.shape[0]
# calculate information for peaks for crystal 1 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# calculate information for peaks for crystal 2 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# generate nptx,nco array containing, x^0,x^1,x^2,...x^nco for
# all nonzero background coefficients
bgdco = np.where(bgd != 0)[0]
nco = bgdco.shape[0]
nonzerobgd = np.zeros(nco)
X = np.ones(shape=(nco, npt))
for i in range(nco):
X[i, :] = shftlam ** (bgd[bgdco[i]] - 1)
nonzerobgd[i] = bgd[bgdco[i]]
# calculate background profile by multiplying this with coefficients
# themselves
bgdprof = nonzerobgd.dot(X)
# bgdprof = np.outer(nonzerobgd, X)
# print bgdprof
# bgdprof = bgdprof[0, :]
# calculate peaks for crystal 1
t1 = np.zeros(npt) # initialise array containing profile
for i in range(nref1):
if pktype == 1:
pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars1[i][0] + pkwid2 * (pkpars1[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect
t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * (
np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2))))
# calculate peaks for crystal 2
t2 = np.zeros(npt) # initialise array containing profile
for i in range(nref2):
if pktype == 1:
pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars2[i][0] + pkwid2 * (pkpars2[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect
t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * (
np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2))))
# calculate final profile
ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2)
# t1 = 1.0;
# t2 = 1.0;
# introduce weighting function and calc chi2...
w = np.ones(len(shftlam)) # equal weighting everywhere
# i1 = np.where(shftlam > 2.15)[0][0]
# j1 = np.where(shftlam > 2.65)[0][0]
# w[i1:j1] = 5 #extra weighting in region of first peaks
# i1 = find(lam>1.68,1,'first');
# j1 = find(lam>2.05,1,'first');
# w(i1:j1)=5; %extra weighting but not too much
resid = (y - ttot) * w
chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt
output = 1
if output == 1:
diam1trans = sf * t1 + bgdprof
diam2trans = sf * t2 + bgdprof
out = np.column_stack((shftlam, diam1trans, diam2trans, ttot, y))
np.savetxt(name, out, delimiter=',')
figure_name_attenuation = 'Attenuation ' + run_number
plt.figure(figure_name_attenuation)
plt.plot(shftlam, ttot, 'r', label='Total att.')
plt.plot(shftlam, diam1trans, 'k', label='Diam 1 att.')
plt.plot(shftlam, diam2trans, 'b', label='Diam 2 att.')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.grid()
for i in range(len(pkpars1)):
plt.arrow(pkpars1[i, 0] * delam, 1.1, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
for i in range(len(pkpars2)):
plt.arrow(pkpars2[i, 0] * delam, 1.15, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
plt.xlim(1, 2.7)
plt.ylim(0, 1.2)
plt.show()
return chi2
def SimTrans3(x):
'''
%SimTrans calculates transmission spectrum from two crystals
% lam - array containing wavelengths to calc over
% hkl - contains all Nref hkl's that calculation is performed for
% bgd - array containing coefficients of polynomial for background
% sf - overall scale factor
% pktype - 1 = gauss; 2 = lorentz; ...
% UB1 - UB matrix for first crystal
% setang1 - setting angles for first crystal (deviations from ideal UB
% location).
% pkpars1 - position(lambda), position(d-spacing), position(ttheta), width, intensity for each Nref reflections
% UB2,setang2,pkpars2 - as above, for second crystal
%
% M. Guthrie 21st Jan 2014
%
% calculate background profile
% determine number of coeffs bgd
%
% version 2 constrains intensities for equivalent dips to be the same
% M. Guthrie 3 April 2014
%
% M. Guthrie 7 April 2014, realised I was making an (obvious) mistake by
% adding the transmissions from the two diamonds. Clearly, they should be
% multiplied. I've implemented the change...will see what difference it
% makes.
%
% M. Guthrie 9 April 2014, introduced possibility to refine L2 and also a
% scale factor for calculated dip wavelengths (to account for diamond
% compressibility).
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
nref1 = hkl1.shape[0] # % number of reflections to integrate over
nref2 = hkl2.shape[0] # % number of reflections to integrate over
# % returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
setang1 = x[0:3]
pkmult1 = x[3:4 + neqv1 - 1]
setang2 = x[4 + neqv1 - 1:6 + neqv1]
sf = x[neqv1 + neqv2 + 7 - 1]
pkwid1 = x[neqv1 + neqv2 + 7]
bgd = x[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1]
pkwid2 = x[neqv1 + neqv2 + 10]
# % if diamond intensities the same, allow single scale f
relsf = x[neqv1 + neqv2 + 11]
if fxsamediam == 1:
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * relsf
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
else:
pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1]
delam = x[neqv1 + neqv2 + 12]
L2 = x[neqv1 + neqv2 + 13]
shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
# number of lambda points to calculate over
npt = shftlam.shape[0]
# calculate information for peaks for crystal 1 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# calculate information for peaks for crystal 2 using hkl,UB1, setang,
# pkpos
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# generate nptx,nco array containing, x^0,x^1,x^2,...x^nco for
# all nonzero background coefficients
bgdco = np.where(bgd != 0)[0]
nco = bgdco.shape[0]
nonzerobgd = np.zeros(nco)
X = np.ones(shape=(nco, npt))
for i in range(nco):
X[i, :] = shftlam ** (bgd[bgdco[i]] - 1)
nonzerobgd[i] = bgd[bgdco[i]]
# calculate background profile by multiplying this with coefficients
# themselves
bgdprof = nonzerobgd.dot(X)
# bgdprof = np.outer(nonzerobgd, X)
# print bgdprof
# bgdprof = bgdprof[0, :]
# calculate peaks for crystal 1
t1 = np.zeros(npt) # initialise array containing profile
for i in range(nref1):
if pktype == 1:
pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars1[i][0] + pkwid2 * (pkpars1[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect
t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * (
np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2))))
# calculate peaks for crystal 2
t2 = np.zeros(npt) # initialise array containing profile
for i in range(nref2):
if pktype == 1:
pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift
sig = pkwid1 * pkpars2[i][0] + pkwid2 * (pkpars2[i][0] ** 2.) # const del(lambda)/lambda
extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect
t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * (
np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2))))
# calculate final profile
ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2)
# t1 = 1.0;
# t2 = 1.0;
# introduce weighting function and calc chi2...
w = np.ones(len(shftlam)) # equal weighting everywhere
# i1 = np.where(shftlam > 2.15)[0][0]
# j1 = np.where(shftlam > 2.65)[0][0]
# w[i1:j1] = 5 #extra weighting in region of first peaks
# i1 = find(lam>1.68,1,'first');
# j1 = find(lam>2.05,1,'first');
# w(i1:j1)=5; %extra weighting but not too much
resid = (y - ttot) * w
chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt
# Print if the user wants verbose minimization
if function_verbose == 'y':
print(('Chi^2 ... ' + str(chi2)))
return chi2
def FitTrans():
'''
Main part of the program
'''
global hkl1, hkl2
global UB1, pkcalcint1
global UB2, pkcalcint2
global pktype
global lam, y, e, TOF
global L1
global ttot
global fxsamediam
global neqv1, eqvlab1, neqv2, eqvlab2
global difa, function_verbose
global run_number
# Customize constraints
cnstang = 1 # if set equal to one, setting angles will be constrained between
# limits defined by anglim1 and anglim2.
anglim1 = 1.0 # if cnstang ~= 1, setting angles for D2 only move by +/- this amount
anglim2 = 1.0 # if cnstang ~= 1, setting angles for D2 can only move by +/- this amount
fxsamediam = 1 # ==1 fix intensities for given hkl to be identical for both diamonds
fixmult = 0 # if ==1 peak multipliers are fixed during refinement
initL2 = 0.340 # m dist from centre of instrument to transmission det
delinitL2 = 0.005 # m variation in det position allowed within refinement
difa = -1e-10 # of order e-10
function_verbose = 'n'
# constraint notifications
if fxsamediam == 0:
print('*diamonds constrained to have same relative dip intensity*\n')
else:
print('*diamonds allowed to have different dip intensities!*')
if cnstang == 1:
print((
'*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(1, anglim1)))
print((
'*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(2, anglim2)))
else:
print('no constraint on setting angles')
if fixmult == 1:
print('*intensity multipliers fixed*')
# Get Input Files...
run_number_UB = str(input('Name of run from which UB matrices were generated: '))
#peaks_file = str(input('Name of file containing diamond peaks: '))
run_number = str(input('Input run number for transmission data: '))
# Build input filenames
fullfilename_ub1 = str(run_number_UB) + 'UB1.dat' # unused variable
fullfilename_ub2 = str(run_number_UB) + 'UB2.dat' # unused variable
fullfilename_trans = str(run_number) + 'trns.csv'
# get both UB's
#UB1, UB2 = UBMG.UBMatrixGen(peaks_file)
# [filename pathname ~] = ...
# uigetfile('*.dat','Choose UB matrix for upstream diamond:');
# fullfilename = [pathname filename];
# fullfilename_ub1 = 'snap13108UB1.dat'
UB1, remainder = getISAWub(fullfilename_ub1)
# [filename pathname ~] = ...
# uigetfile('*.dat','Choose UB matrix for downstream diamond:');
# fullfilename = [pathname filename];
# fullfilename_ub2 = 'snap13108UB2.dat'
UB2, remainder = getISAWub(fullfilename_ub2)
# get transmission data...
# [filename,pathname,~] = ...
# uigetfile('*.csv','Choose transmission datafile:');
# fullfilename = [pathname filename];
#fullfilename_trans = 'transNorm13148.csv'
TOF, yin, ein = getMANTIDdat_keepbinning(fullfilename_trans)
print(('Starting refinement for: ' + fullfilename_trans))
# set-up simulation
L1 = 15.0 # m dist to centre of instrument in m
# global initial conditions
sf = 1
pktype = 1 # 1 = Gaussian, only current working peaktype
pkwid = 0.003 # peak width 'sig' is quadratic in lamda
pkwid2 = 2e-4 # sig = pkwid*lam+pkwid2*lam^2
#####################
# Start work...
#####################
# rebin transmission data
lam = 0.0039558 * TOF / (L1 + initL2)
print(('wavelength limits: ' + str(lam[0]) + ' and ' + str(lam[len(lam) - 1])))
minlam = 0.8
maxlam = 3.5
imin = np.where(lam >= minlam)[0][0]
imax = np.where(lam >= maxlam)[0][0]
lam = lam[imin:imax + 1]
TOF = TOF[imin:imax + 1] # this will be the TOF range used in fit
y = yin[imin:imax + 1]
e = ein[imin:imax + 1]
bgd = np.array([1.0, 0.0])
# generate all allowed diamond hkls:
allhkl = allowedDiamRefs(-7, 7, -7, 7, -7, 7)
# initial conditions for crystal 1
setang1 = np.zeros(3)
# setang1[1:3] = 0.0 # rotation angles applied to refined UB
# use these to calculate resulting peak positions in wavelength
# pkpars1(:,1) is lambda
# pkpars1(:,2) is d-spacing
# pkpars1(:,3) is is 2theta
a, b, c = pkposcalc(allhkl, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# initial conditions for crystal 2
setang2 = np.zeros(3)
# setang2[1:3][0] = 0.0
a, b, c = pkposcalc(allhkl, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# purge all reflections that don't satisfy the Bragg condition and that are
# out of wavelength calculation range...
laminlim = lam[0]
lamaxlim = lam[len(lam) - 1]
nref = len(allhkl)
k1 = 0
k2 = 0
hkl1 = np.zeros(shape=(0, 3))
hkl2 = np.zeros(shape=(0, 3))
for i in range(nref):
if laminlim <= pkpars1[i][0] <= lamaxlim: # reflection in range
hkl1 = np.vstack([hkl1, allhkl[i]])
k1 += 1
if laminlim <= pkpars2[i][0] <= lamaxlim: # reflection in range
hkl2 = np.vstack([hkl2, allhkl[i]])
k2 += 1
print(('There are: ' + str(k1) + ' expected dips due to Crystal 1'))
print(('There are: ' + str(k2) + ' expected dips due to Crystal 2'))
# determine equivalents
# returns array with same dim as input labelling equivs
eqvlab1, neqv1 = findeqvs(hkl1)
eqvlab2, neqv2 = findeqvs(hkl2)
# pkpars1 = np.zeros(shape=(k, 6)) #empty array
a, b, c = pkposcalc(hkl1, UB1, setang1)
pkpars1 = np.column_stack((a, b, c))
# Calculated ref intensities
pkcalcint1 = pkintread(hkl1, (pkpars1[:, 0:3]))
pkcalcint1 *= 1e-6
pkmult1 = np.ones(neqv1) # intensity multiplier for each group of equivs
# pkpars2 = np.zeros(shape=(l, 6)) #empty array
a, b, c = pkposcalc(hkl2, UB2, setang2)
pkpars2 = np.column_stack((a, b, c))
# Calculated ref intensities
pkcalcint2 = pkintread(hkl2, (pkpars2[:, 0:3]))
pkcalcint2 *= 1e-6
pkmult2 = np.ones(neqv2) # peak intensity multiplier
relsf = 1.0 # default value
delam = 1.0
L2 = initL2
tbgd = bgd
# Either generate, or read variable array from file
# This is one big array with all the parameters to be refined in it.
prevf = str(input('Look for pars from a previous run ([y]/n)? '))
if prevf == 'n':
x0 = np.hstack((setang1, pkmult1, setang2, pkmult2, sf,
pkwid, tbgd, pkwid2, relsf, delam, L2))
else:
# choose which file to use
parfilename = str(input('Choose file with starting pars: '))
parfullfilename = parfilename
x0 = dlmread(parfullfilename)
tog = str(input('Got parameters from: \n' + parfilename + '\nUse these ([y]/n)?'))
if tog == 'n':
x0 = np.hstack((setang1, pkmult1, setang2, pkmult2,
sf, pkwid, tbgd, pkwid2, relsf, delam, L2))
print('discarding pars from previous run')
print((str(len(x0)) + ' parameters will be refined'))
nvar = len(x0)
print(('number of variables: ' + str(nvar)))
# nref1 = hkl1.shape[0] # unused variable
# nref2 = hkl2.shape[0] # unused variable
# need to apply correction in the case that pars from previous run had
# fxsamediam==1 and current run also has fxsamediam==1
# to avoid a double multiplication by relsf
if fxsamediam == 1 and x0[neqv1 + neqv2 + 11] != 1:
x0[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[3:4 + neqv2 - 1] / x0[neqv1 + neqv2 + 11]
print(('Diam 2 peak multipliers reset: ' + str(x0[neqv1 + neqv2 + 11])))
# check starting point
chi2 = SimTrans3(x0)
fig_name_start = 'Starting point ' + run_number
plt.figure(fig_name_start)
plt.plot(lam, y, label='Observed')
plt.plot(lam, ttot, label='Calculated')
plt.plot(lam, (y - ttot), label='Residual')
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.grid()
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.show()
print(('Initial chi^2 is: ' + str(chi2)))
showx3(x0)
# Prepare minimization of chi^2 for the calculated profile
# Set-up default constraints...
# inequalities
A = np.zeros(len(x0))
A[0:3] = 0 # setang1 *no constraint
A[3:4 + neqv1 - 1] = -1.0 # pkmult1 Contrains intensities to be positive
A[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2 *no constraint
A[6 + neqv1:7 + neqv1 + neqv2 - 1] = -1.0 # pkmult2
A[6 + neqv1 + neqv2] = -1.0 # sf Scale factor must be +ve
A[7 + neqv1 + neqv2] = -1.0 # pkwid peak width must be +ve
A[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0.0 # bgd *no constraint
A[(neqv1 + neqv2 + 10)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 11)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 12)] = 0.0 # *no constraint
A[(neqv1 + neqv2 + 13)] = 0.0 # *no constraint
# equalities
Aeq = np.zeros(len(x0))
Aeq[0:3] = 0.0 # setang1
Aeq[3:4 + neqv1 - 1] = 0.0 # pkmult1
Aeq[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2
Aeq[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.0 # pkmult2
Aeq[6 + neqv1 + neqv2] = 0.0 # sf
Aeq[7 + neqv1 + neqv2] = 0.0 # pkwid
Aeq[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0 # unfixed bgd
Aeq[neqv1 + neqv2 + 10] = 0
Aeq[neqv1 + neqv2 + 11] = 0
Aeq[neqv1 + neqv2 + 12] = 0
Aeq[neqv1 + neqv2 + 13] = 0
# beq = 0 # unused variable
# lower bounds
lb = np.zeros(len(x0))
lb[0:3] = -10 # setang1
lb[3:4 + neqv1 - 1] = 0.5 # pkmult1
lb[4 + neqv1 - 1:6 + neqv1] = -10 # setang2
lb[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.5 # pkmult2
lb[6 + neqv1 + neqv2] = 0.0 # sf
lb[7 + neqv1 + neqv2] = 0.0005 # pkwid
lb[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [0.995, -0.0005] # bgd
lb[neqv1 + neqv2 + 10] = 0.5e-4 # 2nd order pkwid
lb[neqv1 + neqv2 + 11] = 0.9 # rel scale factor must be positive
lb[neqv1 + neqv2 + 12] = 0.9 # min lambda shift
# (m) min L2 dist sample to d/stream detector
lb[neqv1 + neqv2 + 13] = initL2 - delinitL2
# upper bounds
ub = np.zeros(len(x0))
ub[0:3] = 10 # setang1
ub[3:4 + neqv1 - 1] = 50 # pkmult1
ub[4 + neqv1 - 1:6 + neqv1] = 10 # setang2
ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = 50 # pkmult2
ub[6 + neqv1 + neqv2] = 50 # sf
ub[7 + neqv1 + neqv2] = 0.01 # pkwid
ub[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [1.005, 0.0005] # bgd
ub[neqv1 + neqv2 + 10] = 1.0e-2 # 2nd order pkwid
# diamond shouldn't be more than 2 times bigger!
ub[neqv1 + neqv2 + 11] = 1.1
ub[neqv1 + neqv2 + 12] = 1.1 # max lambda shift
# (m) max L2 dist sample to d/stream detector
ub[neqv1 + neqv2 + 13] = initL2 + delinitL2
# Customize constraints
if cnstang == 1:
# diamond 1
lb[0] = x0[0] - anglim1
lb[1] = x0[1] - anglim1
lb[2] = x0[2] - anglim1
ub[0] = x0[0] + anglim1
ub[1] = x0[1] + anglim1
ub[2] = x0[2] + anglim1
# diamond 2
lb[3 + neqv1] = x0[3 + neqv1] - anglim2
lb[4 + neqv1] = x0[4 + neqv1] - anglim2
lb[5 + neqv1] = x0[5 + neqv1] - anglim2
ub[3 + neqv1] = x0[3 + neqv1] + anglim2
ub[4 + neqv1] = x0[4 + neqv1] + anglim2
ub[5 + neqv1] = x0[5 + neqv1] + anglim2
if fixmult == 1:
lb[3:4 + neqv1 - 1] = x0[3:4 + neqv1 - 1] - 0.01
lb[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[6 + neqv1:7 + neqv1 + neqv2 - 1] - 0.01
ub[3:4 + neqv1 - 1] = x0[3:4 + neqv1 - 1] + 0.01
ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[6 + neqv1:7 + neqv1 + neqv2 - 1] + 0.01
prompt = str(input('Enter anything to begin refinement...'))
print('Refining...\nMight take quite a long time...')
max_number_iterations = int(
input('Maximum number of iterations for minimization: '))
function_verbose = str(input('Verbose minimization ([y]/n): '))
# make dictionary holding constraints for minimization
# equalities (all must equal 0) and inequalities
cons = []
for i in range(len(x0)):
cons.append({'type': 'ineq', 'fun': lambda x: -A[i] * x[i]})
cons = tuple(cons)
# bounds have to be list of tuples with (lower, upper) for each parameter
bds = np.vstack((lb, ub)).T
res = sp.minimize(SimTrans3, x0, method='SLSQP', bounds=bds, constraints=cons,
options={'disp': True, 'maxiter': max_number_iterations})
# tolerance limits to put in minimization if you want so : 'ftol': 0.001
x = np.array(res.x)
#
# minimisation...
#
# figure(2)
# options = optimoptions(@fmincon,'Algorithm','interior-point', 'Display','off', 'MaxFunEvals',10000*nvar,'PlotFcns'
# @optimplotfval, 'MaxIter',4000)
# x, fval, exitflag, output = fmincon(@SimTrans3,x0,A,b,[],[],Aeq beq
# lb,ub,[],options)
# necessary to update these here...
if fxsamediam == 1:
# set peak parameters for second diamond to equal those of first
# but scaled by relsf
# len(x)
# neqv1+neqv2+11
# x[neqv1+neqv2+11]
x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + neqv2 - 1] * x[neqv1 + neqv2 + 11]
print(('Diam 2 peak multipliers reset with factor: ' + str(x[neqv1 + neqv2 + 11])))
else:
# label ensuring I know that run did not use fxsamediam
x[neqv1 + neqv2 + 11] = 1.0
print('AFTER REFINEMENT')
showx3(x)
####
# output final information
####
# calculate chi2 for best fit
chi2 = SimTrans3(x)
print(('Final Chi2 = ' + str(chi2)))
# determine output wavelength range using refined L2 value
# lamscale = x[neqv1 + neqv2 + 12] # unused variable
L2 = x[neqv1 + neqv2 + 13]
outlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2)
fig_name_final = 'Final result ' + run_number
plt.figure(fig_name_final)
plt.plot(outlam, y, 'k', label='Observed')
plt.plot(outlam, ttot, 'r', label='Calculated')
plt.plot(outlam, (y - ttot), 'b', label='Final residuals')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
plt.text(2.1, 0.5, 'CHI^2=' + str(chi2))
plt.grid()
for i in range(len(pkpars1)):
plt.arrow(pkpars1[i, 0] * delam, 1.1, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
for i in range(len(pkpars2)):
plt.arrow(pkpars2[i, 0] * delam, 1.15, 0.0, 0.025,
fc="k", ec="k", head_width=0, head_length=0)
plt.xlim(1.0, 2.7)
plt.ylim(ymax=1.2)
plt.xlabel('Wavelength (A)')
plt.ylabel('Transmission')
plt.show()
prompt = str(input('output best fit to file ([y]/n): '))
if prompt == 'n':
print('Ending')
else:
fitparname = str(run_number) + '.best_fit_pars3.dat'
np.savetxt(fitparname, x, delimiter=',')
print(('output parameters written to file: \n' + fitparname))
ofilename = str(run_number) + '.fitted3.dat'
SimTransOutput3(ofilename, x) # generate output file with fitted data
if __name__ == "__main__":
FitTrans()
| gpl-3.0 |
kaiyuanl/gem5 | util/stats/barchart.py | 90 | 12472 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Lisa Hsu
import matplotlib, pylab
from matplotlib.font_manager import FontProperties
from matplotlib.numerix import array, arange, reshape, shape, transpose, zeros
from matplotlib.numerix import Float
from matplotlib.ticker import NullLocator
matplotlib.interactive(False)
from chart import ChartOptions
class BarChart(ChartOptions):
def __init__(self, default=None, **kwargs):
super(BarChart, self).__init__(default, **kwargs)
self.inputdata = None
self.chartdata = None
self.inputerr = None
self.charterr = None
def gen_colors(self, count):
cmap = matplotlib.cm.get_cmap(self.colormap)
if count == 1:
return cmap([ 0.5 ])
if count < 5:
return cmap(arange(5) / float(4))[:count]
return cmap(arange(count) / float(count - 1))
# The input data format does not match the data format that the
# graph function takes because it is intuitive. The conversion
# from input data format to chart data format depends on the
# dimensionality of the input data. Check here for the
# dimensionality and correctness of the input data
def set_data(self, data):
if data is None:
self.inputdata = None
self.chartdata = None
return
data = array(data)
dim = len(shape(data))
if dim not in (1, 2, 3):
raise AttributeError, "Input data must be a 1, 2, or 3d matrix"
self.inputdata = data
# If the input data is a 1d matrix, then it describes a
# standard bar chart.
if dim == 1:
self.chartdata = array([[data]])
# If the input data is a 2d matrix, then it describes a bar
# chart with groups. The matrix being an array of groups of
# bars.
if dim == 2:
self.chartdata = transpose([data], axes=(2,0,1))
# If the input data is a 3d matrix, then it describes an array
# of groups of bars with each bar being an array of stacked
# values.
if dim == 3:
self.chartdata = transpose(data, axes=(1,2,0))
def get_data(self):
return self.inputdata
data = property(get_data, set_data)
def set_err(self, err):
if err is None:
self.inputerr = None
self.charterr = None
return
err = array(err)
dim = len(shape(err))
if dim not in (1, 2, 3):
raise AttributeError, "Input err must be a 1, 2, or 3d matrix"
self.inputerr = err
if dim == 1:
self.charterr = array([[err]])
if dim == 2:
self.charterr = transpose([err], axes=(2,0,1))
if dim == 3:
self.charterr = transpose(err, axes=(1,2,0))
def get_err(self):
return self.inputerr
err = property(get_err, set_err)
# Graph the chart data.
# Input is a 3d matrix that describes a plot that has multiple
# groups, multiple bars in each group, and multiple values stacked
# in each bar. The underlying bar() function expects a sequence of
# bars in the same stack location and same group location, so the
# organization of the matrix is that the inner most sequence
# represents one of these bar groups, then those are grouped
# together to make one full stack of bars in each group, and then
# the outer most layer describes the groups. Here is an example
# data set and how it gets plotted as a result.
#
# e.g. data = [[[10,11,12], [13,14,15], [16,17,18], [19,20,21]],
# [[22,23,24], [25,26,27], [28,29,30], [31,32,33]]]
#
# will plot like this:
#
# 19 31 20 32 21 33
# 16 28 17 29 18 30
# 13 25 14 26 15 27
# 10 22 11 23 12 24
#
# Because this arrangement is rather conterintuitive, the rearrange
# function takes various matricies and arranges them to fit this
# profile.
#
# This code deals with one of the dimensions in the matrix being
# one wide.
#
def graph(self):
if self.chartdata is None:
raise AttributeError, "Data not set for bar chart!"
dim = len(shape(self.inputdata))
cshape = shape(self.chartdata)
if self.charterr is not None and shape(self.charterr) != cshape:
raise AttributeError, 'Dimensions of error and data do not match'
if dim == 1:
colors = self.gen_colors(cshape[2])
colors = [ [ colors ] * cshape[1] ] * cshape[0]
if dim == 2:
colors = self.gen_colors(cshape[0])
colors = [ [ [ c ] * cshape[2] ] * cshape[1] for c in colors ]
if dim == 3:
colors = self.gen_colors(cshape[1])
colors = [ [ [ c ] * cshape[2] for c in colors ] ] * cshape[0]
colors = array(colors)
self.figure = pylab.figure(figsize=self.chart_size)
outer_axes = None
inner_axes = None
if self.xsubticks is not None:
color = self.figure.get_facecolor()
self.metaaxes = self.figure.add_axes(self.figure_size,
axisbg=color, frameon=False)
for tick in self.metaaxes.xaxis.majorTicks:
tick.tick1On = False
tick.tick2On = False
self.metaaxes.set_yticklabels([])
self.metaaxes.set_yticks([])
size = [0] * 4
size[0] = self.figure_size[0]
size[1] = self.figure_size[1] + .12
size[2] = self.figure_size[2]
size[3] = self.figure_size[3] - .12
self.axes = self.figure.add_axes(size)
outer_axes = self.metaaxes
inner_axes = self.axes
else:
self.axes = self.figure.add_axes(self.figure_size)
outer_axes = self.axes
inner_axes = self.axes
bars_in_group = len(self.chartdata)
width = 1.0 / ( bars_in_group + 1)
center = width / 2
bars = []
for i,stackdata in enumerate(self.chartdata):
bottom = array([0.0] * len(stackdata[0]), Float)
stack = []
for j,bardata in enumerate(stackdata):
bardata = array(bardata)
ind = arange(len(bardata)) + i * width + center
yerr = None
if self.charterr is not None:
yerr = self.charterr[i][j]
bar = self.axes.bar(ind, bardata, width, bottom=bottom,
color=colors[i][j], yerr=yerr)
if self.xsubticks is not None:
self.metaaxes.bar(ind, [0] * len(bardata), width)
stack.append(bar)
bottom += bardata
bars.append(stack)
if self.xlabel is not None:
outer_axes.set_xlabel(self.xlabel)
if self.ylabel is not None:
inner_axes.set_ylabel(self.ylabel)
if self.yticks is not None:
ymin, ymax = self.axes.get_ylim()
nticks = float(len(self.yticks))
ticks = arange(nticks) / (nticks - 1) * (ymax - ymin) + ymin
inner_axes.set_yticks(ticks)
inner_axes.set_yticklabels(self.yticks)
elif self.ylim is not None:
inner_axes.set_ylim(self.ylim)
if self.xticks is not None:
outer_axes.set_xticks(arange(cshape[2]) + .5)
outer_axes.set_xticklabels(self.xticks)
if self.xsubticks is not None:
numticks = (cshape[0] + 1) * cshape[2]
inner_axes.set_xticks(arange(numticks) * width + 2 * center)
xsubticks = list(self.xsubticks) + [ '' ]
inner_axes.set_xticklabels(xsubticks * cshape[2], fontsize=7,
rotation=30)
if self.legend is not None:
if dim == 1:
lbars = bars[0][0]
if dim == 2:
lbars = [ bars[i][0][0] for i in xrange(len(bars))]
if dim == 3:
number = len(bars[0])
lbars = [ bars[0][number - j - 1][0] for j in xrange(number)]
if self.fig_legend:
self.figure.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
else:
self.axes.legend(lbars, self.legend, self.legend_loc,
prop=FontProperties(size=self.legend_size))
if self.title is not None:
self.axes.set_title(self.title)
def savefig(self, name):
self.figure.savefig(name)
def savecsv(self, name):
f = file(name, 'w')
data = array(self.inputdata)
dim = len(data.shape)
if dim == 1:
#if self.xlabel:
# f.write(', '.join(list(self.xlabel)) + '\n')
f.write(', '.join([ '%f' % val for val in data]) + '\n')
if dim == 2:
#if self.xlabel:
# f.write(', '.join([''] + list(self.xlabel)) + '\n')
for i,row in enumerate(data):
ylabel = []
#if self.ylabel:
# ylabel = [ self.ylabel[i] ]
f.write(', '.join(ylabel + [ '%f' % v for v in row]) + '\n')
if dim == 3:
f.write("don't do 3D csv files\n")
pass
f.close()
if __name__ == '__main__':
from random import randrange
import random, sys
dim = 3
number = 5
args = sys.argv[1:]
if len(args) > 3:
sys.exit("invalid number of arguments")
elif len(args) > 0:
myshape = [ int(x) for x in args ]
else:
myshape = [ 3, 4, 8 ]
# generate a data matrix of the given shape
size = reduce(lambda x,y: x*y, myshape)
#data = [ random.randrange(size - i) + 10 for i in xrange(size) ]
data = [ float(i)/100.0 for i in xrange(size) ]
data = reshape(data, myshape)
# setup some test bar charts
if True:
chart1 = BarChart()
chart1.data = data
chart1.xlabel = 'Benchmark'
chart1.ylabel = 'Bandwidth (GBps)'
chart1.legend = [ 'x%d' % x for x in xrange(myshape[-1]) ]
chart1.xticks = [ 'xtick%d' % x for x in xrange(myshape[0]) ]
chart1.title = 'this is the title'
if len(myshape) > 2:
chart1.xsubticks = [ '%d' % x for x in xrange(myshape[1]) ]
chart1.graph()
chart1.savefig('/tmp/test1.png')
chart1.savefig('/tmp/test1.ps')
chart1.savefig('/tmp/test1.eps')
chart1.savecsv('/tmp/test1.csv')
if False:
chart2 = BarChart()
chart2.data = data
chart2.colormap = 'gray'
chart2.graph()
chart2.savefig('/tmp/test2.png')
chart2.savefig('/tmp/test2.ps')
# pylab.show()
| bsd-3-clause |
jereze/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
IamJeffG/geopandas | geopandas/tests/test_geodataframe.py | 1 | 19216 | from __future__ import absolute_import
import json
import os
import tempfile
import shutil
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from shapely.geometry import Point, Polygon
import fiona
from geopandas import GeoDataFrame, read_file, GeoSeries
from geopandas.tests.util import assert_geoseries_equal, connect, create_db, \
download_nybb, PACKAGE_DIR, PANDAS_NEW_SQL_API, unittest, validate_boro_df
class TestDataFrame(unittest.TestCase):
def setUp(self):
N = 10
nybb_filename, nybb_zip_path = download_nybb()
self.df = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
with fiona.open(nybb_zip_path, vfs='zip://' + nybb_filename) as f:
self.schema = f.schema
self.tempdir = tempfile.mkdtemp()
self.boros = self.df['BoroName']
self.crs = {'init': 'epsg:4326'}
self.df2 = GeoDataFrame([
{'geometry': Point(x, y), 'value1': x + y, 'value2': x * y}
for x, y in zip(range(N), range(N))], crs=self.crs)
self.df3 = read_file(os.path.join(PACKAGE_DIR, 'examples', 'null_geom.geojson'))
self.line_paths = self.df3['Name']
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_df_init(self):
self.assertTrue(type(self.df2) is GeoDataFrame)
self.assertTrue(self.df2.crs == self.crs)
def test_different_geo_colname(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
locs = GeoSeries(data['location'], crs=self.crs)
assert_geoseries_equal(df.geometry, locs)
self.assert_('geometry' not in df)
self.assertEqual(df.geometry.name, 'location')
# internal implementation detail
self.assertEqual(df._geometry_column_name, 'location')
geom2 = [Point(x, y) for x, y in zip(range(5, 10), range(5))]
df2 = df.set_geometry(geom2, crs='dummy_crs')
self.assert_('geometry' in df2)
self.assert_('location' in df2)
self.assertEqual(df2.crs, 'dummy_crs')
self.assertEqual(df2.geometry.crs, 'dummy_crs')
# reset so it outputs okay
df2.crs = df.crs
assert_geoseries_equal(df2.geometry, GeoSeries(geom2, crs=df2.crs))
# for right now, non-geometry comes back as series
assert_geoseries_equal(df2['location'], df['location'],
check_series_type=False, check_dtype=False)
def test_geo_getitem(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
self.assert_(isinstance(df.geometry, GeoSeries))
df['geometry'] = df["A"]
self.assert_(isinstance(df.geometry, GeoSeries))
self.assertEqual(df.geometry[0], data['location'][0])
# good if this changed in the future
self.assert_(not isinstance(df['geometry'], GeoSeries))
self.assert_(isinstance(df['location'], GeoSeries))
data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5), range(5))]
df = GeoDataFrame(data, crs=self.crs)
self.assert_(isinstance(df.geometry, GeoSeries))
self.assert_(isinstance(df['geometry'], GeoSeries))
# good if this changed in the future
self.assert_(not isinstance(df['location'], GeoSeries))
def test_geometry_property(self):
assert_geoseries_equal(self.df.geometry, self.df['geometry'],
check_dtype=True, check_index_type=True)
df = self.df.copy()
new_geom = [Point(x, y) for x, y in zip(range(len(self.df)),
range(len(self.df)))]
df.geometry = new_geom
new_geom = GeoSeries(new_geom, index=df.index, crs=df.crs)
assert_geoseries_equal(df.geometry, new_geom)
assert_geoseries_equal(df['geometry'], new_geom)
# new crs
gs = GeoSeries(new_geom, crs="epsg:26018")
df.geometry = gs
self.assertEqual(df.crs, "epsg:26018")
def test_geometry_property_errors(self):
with self.assertRaises(AttributeError):
df = self.df.copy()
del df['geometry']
df.geometry
# list-like error
with self.assertRaises(ValueError):
df = self.df2.copy()
df.geometry = 'value1'
# list-like error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = 'apple'
# non-geometry error
with self.assertRaises(TypeError):
df = self.df.copy()
df.geometry = list(range(df.shape[0]))
with self.assertRaises(KeyError):
df = self.df.copy()
del df['geometry']
df['geometry']
# ndim error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = df
def test_set_geometry(self):
geom = GeoSeries([Point(x, y) for x, y in zip(range(5), range(5))])
original_geom = self.df.geometry
df2 = self.df.set_geometry(geom)
self.assert_(self.df is not df2)
assert_geoseries_equal(df2.geometry, geom)
assert_geoseries_equal(self.df.geometry, original_geom)
assert_geoseries_equal(self.df['geometry'], self.df.geometry)
# unknown column
with self.assertRaises(ValueError):
self.df.set_geometry('nonexistent-column')
# ndim error
with self.assertRaises(ValueError):
self.df.set_geometry(self.df)
# new crs - setting should default to GeoSeries' crs
gs = GeoSeries(geom, crs="epsg:26018")
new_df = self.df.set_geometry(gs)
self.assertEqual(new_df.crs, "epsg:26018")
# explicit crs overrides self and dataframe
new_df = self.df.set_geometry(gs, crs="epsg:27159")
self.assertEqual(new_df.crs, "epsg:27159")
self.assertEqual(new_df.geometry.crs, "epsg:27159")
# Series should use dataframe's
new_df = self.df.set_geometry(geom.values)
self.assertEqual(new_df.crs, self.df.crs)
self.assertEqual(new_df.geometry.crs, self.df.crs)
def test_set_geometry_col(self):
g = self.df.geometry
g_simplified = g.simplify(100)
self.df['simplified_geometry'] = g_simplified
df2 = self.df.set_geometry('simplified_geometry')
# Drop is false by default
self.assert_('simplified_geometry' in df2)
assert_geoseries_equal(df2.geometry, g_simplified)
# If True, drops column and renames to geometry
df3 = self.df.set_geometry('simplified_geometry', drop=True)
self.assert_('simplified_geometry' not in df3)
assert_geoseries_equal(df3.geometry, g_simplified)
def test_set_geometry_inplace(self):
geom = [Point(x, y) for x, y in zip(range(5), range(5))]
ret = self.df.set_geometry(geom, inplace=True)
self.assert_(ret is None)
geom = GeoSeries(geom, index=self.df.index, crs=self.df.crs)
assert_geoseries_equal(self.df.geometry, geom)
def test_set_geometry_series(self):
# Test when setting geometry with a Series that
# alignment will occur
#
# Reverse the index order
# Set the Series to be Point(i,i) where i is the index
self.df.index = range(len(self.df)-1, -1, -1)
d = {}
for i in range(len(self.df)):
d[i] = Point(i, i)
g = GeoSeries(d)
# At this point, the DataFrame index is [4,3,2,1,0] and the
# GeoSeries index is [0,1,2,3,4]. Make sure set_geometry aligns
# them to match indexes
df = self.df.set_geometry(g)
for i, r in df.iterrows():
self.assertAlmostEqual(i, r['geometry'].x)
self.assertAlmostEqual(i, r['geometry'].y)
def test_to_json(self):
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_geom_col(self):
df = self.df.copy()
df['geom'] = df['geometry']
df['geometry'] = np.arange(len(df))
df.set_geometry('geom', inplace=True)
text = df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_na(self):
# Set a value as nan and make sure it's written
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(len(data['features']) == 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(props['Shape_Area'] is None)
def test_to_json_bad_na(self):
# Check that a bad na argument raises error
with self.assertRaises(ValueError):
text = self.df.to_json(na='garbage')
def test_to_json_dropna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='drop')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
if props['BoroName'] == 'Queens':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Area' not in props)
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Leng' not in props)
self.assertTrue('Shape_Area' in props)
else:
self.assertEqual(len(props), 4)
def test_to_json_keepna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='keep')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(np.isnan(props['Shape_Area']))
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertTrue(np.isnan(props['Shape_Leng']))
self.assertTrue('Shape_Area' in props)
def test_copy(self):
df2 = self.df.copy()
self.assertTrue(type(df2) is GeoDataFrame)
self.assertEqual(self.df.crs, df2.crs)
def test_to_file(self):
""" Test to_file and from_file """
tempfilename = os.path.join(self.tempdir, 'boros.shp')
self.df.to_file(tempfilename)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df)
self.assertTrue(len(df) == 5)
self.assertTrue(np.alltrue(df['BoroName'].values == self.boros))
# Write layer with null geometry out to file
tempfilename = os.path.join(self.tempdir, 'null_geom.shp')
self.df3.to_file(tempfilename)
# Read layer back in
df3 = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df3)
self.assertTrue(len(df3) == 2)
self.assertTrue(np.alltrue(df3['Name'].values == self.line_paths))
def test_to_file_types(self):
""" Test various integer type columns (GH#93) """
tempfilename = os.path.join(self.tempdir, 'int.shp')
int_types = [np.int, np.int8, np.int16, np.int32, np.int64, np.intp,
np.uint8, np.uint16, np.uint32, np.uint64, np.long]
geometry = self.df2.geometry
data = dict((str(i), np.arange(len(geometry), dtype=dtype))
for i, dtype in enumerate(int_types))
df = GeoDataFrame(data, geometry=geometry)
df.to_file(tempfilename)
def test_mixed_types_to_file(self):
""" Test that mixed geometry types raise error when writing to file """
tempfilename = os.path.join(self.tempdir, 'test.shp')
s = GeoDataFrame({'geometry': [Point(0, 0),
Polygon([(0, 0), (1, 0), (1, 1)])]})
with self.assertRaises(ValueError):
s.to_file(tempfilename)
def test_to_file_schema(self):
"""
Ensure that the file is written according to the schema
if it is specified
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
tempfilename = os.path.join(self.tempdir, 'test.shp')
properties = OrderedDict([
('Shape_Leng', 'float:19.11'),
('BoroName', 'str:40'),
('BoroCode', 'int:10'),
('Shape_Area', 'float:19.11'),
])
schema = {'geometry': 'Polygon', 'properties': properties}
# Take the first 2 features to speed things up a bit
self.df.iloc[:2].to_file(tempfilename, schema=schema)
with fiona.open(tempfilename) as f:
result_schema = f.schema
self.assertEqual(result_schema, schema)
def test_bool_index(self):
# Find boros with 'B' in their name
df = self.df[self.df['BoroName'].str.contains('B')]
self.assertTrue(len(df) == 2)
boros = df['BoroName'].values
self.assertTrue('Brooklyn' in boros)
self.assertTrue('Bronx' in boros)
self.assertTrue(type(df) is GeoDataFrame)
def test_transform(self):
df2 = self.df2.copy()
df2.crs = {'init': 'epsg:26918', 'no_defs': True}
lonlat = df2.to_crs(epsg=4326)
utm = lonlat.to_crs(epsg=26918)
self.assertTrue(all(df2['geometry'].geom_almost_equals(utm['geometry'], decimal=2)))
def test_from_features(self):
nybb_filename, nybb_zip_path = download_nybb()
with fiona.open(nybb_zip_path,
vfs='zip://' + nybb_filename) as f:
features = list(f)
crs = f.crs
df = GeoDataFrame.from_features(features, crs=crs)
df.rename(columns=lambda x: x.lower(), inplace=True)
validate_boro_df(self, df)
self.assert_(df.crs == crs)
def test_from_features_unaligned_properties(self):
p1 = Point(1, 1)
f1 = {'type': 'Feature',
'properties': {'a': 0},
'geometry': p1.__geo_interface__}
p2 = Point(2, 2)
f2 = {'type': 'Feature',
'properties': {'b': 1},
'geometry': p2.__geo_interface__}
p3 = Point(3, 3)
f3 = {'type': 'Feature',
'properties': {'a': 2},
'geometry': p3.__geo_interface__}
df = GeoDataFrame.from_features([f1, f2, f3])
result = df[['a', 'b']]
expected = pd.DataFrame.from_dict([{'a': 0, 'b': np.nan},
{'a': np.nan, 'b': 1},
{'a': 2, 'b': np.nan}])
assert_frame_equal(expected, result)
def test_from_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = GeoDataFrame.from_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_from_postgis_custom_geom_col(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = GeoDataFrame.from_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_dataframe_to_geodataframe(self):
df = pd.DataFrame({"A": range(len(self.df)), "location":
list(self.df.geometry)}, index=self.df.index)
gf = df.set_geometry('location', crs=self.df.crs)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf, GeoDataFrame)
assert_geoseries_equal(gf.geometry, self.df.geometry)
self.assertEqual(gf.geometry.name, 'location')
self.assert_('geometry' not in gf)
gf2 = df.set_geometry('location', crs=self.df.crs, drop=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf2, GeoDataFrame)
self.assertEqual(gf2.geometry.name, 'geometry')
self.assert_('geometry' in gf2)
self.assert_('location' not in gf2)
self.assert_('location' in df)
# should be a copy
df.ix[0, "A"] = 100
self.assertEqual(gf.ix[0, "A"], 0)
self.assertEqual(gf2.ix[0, "A"], 0)
with self.assertRaises(ValueError):
df.set_geometry('location', inplace=True)
def test_geodataframe_geointerface(self):
self.assertEqual(self.df.__geo_interface__['type'], 'FeatureCollection')
self.assertEqual(len(self.df.__geo_interface__['features']),
self.df.shape[0])
def test_geodataframe_geojson_no_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=False)
self.assertFalse('bbox' in geo.keys())
for feature in geo['features']:
self.assertFalse('bbox' in feature.keys())
def test_geodataframe_geojson_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=True)
self.assertTrue('bbox' in geo.keys())
self.assertEqual(len(geo['bbox']), 4)
self.assertTrue(isinstance(geo['bbox'], tuple))
for feature in geo['features']:
self.assertTrue('bbox' in feature.keys())
def test_pickle(self):
filename = os.path.join(self.tempdir, 'df.pkl')
self.df.to_pickle(filename)
unpickled = pd.read_pickle(filename)
assert_frame_equal(self.df, unpickled)
self.assertEqual(self.df.crs, unpickled.crs)
| bsd-3-clause |
dennisss/sympy | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
AudioBonsai/audiobonsai | weekly_sampler.py | 2 | 8309 | from audiobonsai import wsgi, settings
from datetime import datetime
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
import pandas as pd
from pprint import pprint
from sausage_grinder.models import Artist, ReleaseSet
from spotify_helper.models import SpotifyUser
from spotipy import SpotifyException
from spotify_helper.helpers import get_user_conn
def attr_score(val, factor):
return (val//factor)**2
def stat_score(df, in_col, out_col, multiplier=1):
in_min = df[in_col].min()
in_max = df[in_col].max()
in_col_temp = in_col + "_temp"
df.loc[:, in_col_temp] = df[in_col].apply(lambda x: x - in_min)
factor = (in_max - in_min) // 25
df.loc[:, out_col] = df[in_col_temp].apply(lambda x: attr_score(x, factor) * multiplier)
return df
def build_artists_dict(week):
artists = Artist.objects.filter(weeks=week)
artists_dict = {}
for artist in artists:
release = artist.week_release(week)
if release is None:
print('No release found for {} in week {}'.format(artist, week))
continue
if release.release_type == 'single':
continue
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
if artist.release_day_foll <= 100 and artist.followers_change_pct_from_release >= 100:
#print('{}: foll pct reset'.format(artist))
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artist.followers_change_pct_from_release = min(artist.followers_change_from_release, 100)
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
if artist.release_day_pop <= 10 and artist.pop_change_pct_from_release >= 100:
#print('{}: pop pct reset'.format(artist))
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artist.pop_change_pct_from_release = min(artist.pop_change_from_release*10, 100)
#print('{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}'.format(artist.popularity, artist.release_day_pop, artist.pop_change_from_release, artist.pop_change_pct_from_release, artist.followers, artist.release_day_foll, artist.followers_change_from_release, artist.followers_change_pct_from_release, artist))
artists_dict[artist.spotify_uri] = {
'obj': artist,
'name': artist.name,
'pop': artist.popularity,
'pop_change': artist.pop_change_from_release,
'pop_change_pct': artist.pop_change_pct_from_release,
'foll': artist.followers,
'foll_change': artist.followers_change_from_release,
'foll_change_pct': artist.followers_change_pct_from_release,
'release_day_foll': artist.release_day_foll,
'release_day_pop': artist.release_day_pop,
'release': release
}
return artists_dict
def build_artists_df(week):
artists_dict = build_artists_dict(week)
artists_df = pd.DataFrame.from_dict(artists_dict, orient='index')
artists_df = stat_score(artists_df, 'pop', 'pop_score')
artists_df = stat_score(artists_df, 'pop_change', 'pop_change_score')
artists_df = stat_score(artists_df, 'pop_change_pct', 'pop_change_pct_score')
artists_df = stat_score(artists_df, 'foll', 'foll_score')
artists_df = stat_score(artists_df, 'foll_change', 'foll_change_score')
artists_df = stat_score(artists_df, 'foll_change_pct', 'foll_change_score_pct')
artists_df['final_score'] = artists_df['pop_score'] + \
artists_df['foll_score'] + \
artists_df['pop_change_pct_score'] + \
artists_df['pop_change_score'] + \
artists_df['foll_change_score'] + \
artists_df['foll_change_score_pct']
return artists_df
if __name__ == '__main__':
weeks = ReleaseSet.objects.all().order_by('-week_date')
week = weeks[0]
artists_df = build_artists_df(week)
artists_df = artists_df.sort_values(by='final_score', ascending=False)
artists_df = artists_df.drop_duplicates(subset='release', keep='first')
artists_df['category'] = pd.cut(artists_df['release_day_pop'], 10)
#artists_df['category'] = pd.qcut(artists_df['release_day_foll'], 5, duplicates='drop')
#top100_df = artists_df.sort_values(by='final_score', ascending=False)
#top100_df = top100_df.drop_duplicates(subset='release', keep='first').head(200)
#print(top100_df)
playlist_name = 'Fresh Cuts: {}'.format(week.week_date.strftime('%b %d, %Y'))
user = User.objects.get(username=settings.SPOTIFY_USERNAME)
spotify_user = SpotifyUser.objects.get(user=user)
track_list = []
sp = get_user_conn(spotify_user, '127.0.0.1:8000')
category_num = 1
for category in sorted(artists_df['category'].unique()):
category_df = artists_df[artists_df['category'] == category]
print('\nCategory {:d}'.format(category_num))
print('{}: Min {:10d}, Max {:10d}, Count {:10d}'.format(category, category_df['release_day_pop'].min(), category_df['release_day_pop'].max(), len(category_df)))
category_df = category_df.head(20)
#print(category_df)
#print('{}: Min {:10d}, Max {:10d}, Count {:10d}'.format(category, category_df['release_day_foll'].min(), category_df['release_day_foll'].max(), len(category_df)))
#for release in top100_df['release'].values:
for release in category_df['release'].values:
try:
album_dets = sp.album(release.spotify_uri)
except requests.exceptions.ConnectionError:
continue
print('#{:03d} {:6s}: {}'.format(len(track_list)+1, release.release_type, release))
if album_dets['type'] == 'single':
track_list.append(album_dets['tracks']['items'][0]['uri'])
else:
track_dict = {}
for track in album_dets['tracks']['items'][:5]:
if track['duration_ms'] not in track_dict.keys():
track_dict[track['duration_ms']] = []
track_dict[track['duration_ms']].append(track['uri'])
track_times = sorted(list(track_dict.keys()))
median_time_key = track_times[int(len(track_times)/2)]
track_list.append(track_dict[median_time_key][0])
category_num += 1
#playlist = sp.user_playlist_create(user, playlist_name)
#pprint(playlist)
sausage_grinder_playlist = 'spotify:user:audiobonsai:playlist:6z8m6hjBXxClAZt3oYONCa'
batch_size = 100
offset = 0
while offset < len(track_list):
if offset == 0:
#playlist_tracks = sp.user_playlist_replace_tracks(user, playlist['id'], track_list[offset:offset + batch_size])
playlist_tracks = sp.user_playlist_replace_tracks(user, sausage_grinder_playlist, track_list[offset:offset + batch_size])
else:
#playlist_tracks = sp.user_playlist_add_tracks(user, playlist['id'], track_list[offset:offset + batch_size])
playlist_tracks = sp.user_playlist_add_tracks(user, sausage_grinder_playlist, track_list[offset:offset + batch_size])
offset += batch_size
pprint(playlist_tracks)
| apache-2.0 |
JaviMerino/trappy | tests/test_dynamic.py | 2 | 4590 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import matplotlib
from test_sched import BaseTestSched
from trappy.base import Base
import trappy
class DynamicEvent(Base):
"""Test the ability to register
specific classes to trappy"""
unique_word = "dynamic_test_key"
name = "dynamic_event"
class TestDynamicEvents(BaseTestSched):
def __init__(self, *args, **kwargs):
super(TestDynamicEvents, self).__init__(*args, **kwargs)
def test_dynamic_data_frame(self):
"""
Test if the dynamic events are populated
in the data frame
"""
parse_class = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t = trappy.FTrace(name="first")
self.assertTrue(len(t.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(parse_class)
def test_dynamic_class_attr(self):
"""
Test the attibutes of the dynamically
generated class
"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key",
pivot="test_pivot")
self.assertEquals(cls.__name__, "DynamicEvent")
self.assertEquals(cls.name, "dynamic_event")
self.assertEquals(cls.unique_word, "dynamic_test_key")
self.assertEquals(cls.pivot, "test_pivot")
trappy.unregister_dynamic_ftrace(cls)
def test_dynamic_event_plot(self):
"""Test if plotter can accept a dynamic class
for a template argument"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t = trappy.FTrace(name="first")
l = trappy.LinePlot(t, cls, column="load")
l.view(test=True)
trappy.unregister_dynamic_ftrace(cls)
def test_dynamic_event_scope(self):
"""Test the case when an "all" scope class is
registered. it should appear in both thermal and sched
ftrace class definitions when scoped ftrace objects are created
"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t1 = trappy.FTrace(name="first")
self.assertTrue(t1.class_definitions.has_key(cls.name))
trappy.unregister_dynamic_ftrace(cls)
def test_register_ftrace_parser(self):
trappy.register_ftrace_parser(DynamicEvent)
t = trappy.FTrace(name="first")
self.assertTrue(len(t.dynamic_event.data_frame) == 1)
trappy.unregister_ftrace_parser(DynamicEvent)
def test_no_none_pivot(self):
"""register_dynamic_ftrace() with default value for pivot doesn't create a class with a pivot=None"""
cls = trappy.register_dynamic_ftrace("MyEvent", "my_dyn_test_key")
self.assertFalse(hasattr(cls, "pivot"))
trappy.unregister_dynamic_ftrace(cls)
def test_unregister_dynamic_ftrace(self):
"""Test that dynamic events can be unregistered"""
dyn_event = trappy.register_dynamic_ftrace("DynamicEvent",
"dynamic_test_key")
trace = trappy.FTrace(name="first")
self.assertTrue(len(trace.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(dyn_event)
trace = trappy.FTrace(name="first")
self.assertFalse(hasattr(trace, "dynamic_event"))
dyn_event = trappy.register_dynamic_ftrace("DynamicEvent",
"dynamic_test_key",
scope="sched")
trace = trappy.FTrace(name="first")
self.assertTrue(len(trace.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(dyn_event)
trace = trappy.FTrace(name="first")
self.assertFalse(hasattr(trace, "dynamic_event"))
def test_unregister_ftrace_parser(self):
"""unregister_ftrace_parser() works"""
trappy.register_ftrace_parser(DynamicEvent)
trappy.unregister_ftrace_parser(DynamicEvent)
trace = trappy.FTrace()
self.assertFalse(hasattr(trace, "dynamic_event"))
| apache-2.0 |
rezasafi/spark | examples/src/main/python/sql/arrow.py | 8 | 8426 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def scalar_iter_pandas_udf_example(spark):
# $example on:scalar_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# When the UDF is called with a single column that is not StructType,
# the input to the underlying function is an iterator of pd.Series.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_one(batch_iter):
for x in batch_iter:
yield x + 1
df.select(plus_one(col("x"))).show()
# +-----------+
# |plus_one(x)|
# +-----------+
# | 2|
# | 3|
# | 4|
# +-----------+
# When the UDF is called with more than one columns,
# the input to the underlying function is an iterator of pd.Series tuple.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_cols(batch_iter):
for a, b in batch_iter:
yield a * b
df.select(multiply_two_cols(col("x"), col("x"))).show()
# +-----------------------+
# |multiply_two_cols(x, x)|
# +-----------------------+
# | 1|
# | 4|
# | 9|
# +-----------------------+
# When the UDF is called with a single column that is StructType,
# the input to the underlying function is an iterator of pd.DataFrame.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_nested_cols(pdf_iter):
for pdf in pdf_iter:
yield pdf["a"] * pdf["b"]
df.select(
multiply_two_nested_cols(
struct(col("x").alias("a"), col("x").alias("b"))
).alias("y")
).show()
# +---+
# | y|
# +---+
# | 1|
# | 4|
# | 9|
# +---+
# In the UDF, you can initialize some states before processing batches.
# Wrap your code with try/finally or use context managers to ensure
# the release of resources at the end.
y_bc = spark.sparkContext.broadcast(1)
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_y(batch_iter):
y = y_bc.value # initialize states
try:
for x in batch_iter:
yield x + y
finally:
pass # release resources here, if any
df.select(plus_y(col("x"))).show()
# +---------+
# |plus_y(x)|
# +---------+
# | 2|
# | 3|
# | 4|
# +---------+
# $example off:scalar_iter_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(subtract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
# $example on:grouped_agg_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def mean_udf(v):
return v.mean()
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
# $example off:grouped_agg_pandas_udf$
def map_iter_pandas_udf_example(spark):
# $example on:map_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
@pandas_udf(df.schema, PandasUDFType.MAP_ITER)
def filter_func(batch_iter):
for pdf in batch_iter:
yield pdf[pdf.id == 1]
df.mapInPandas(filter_func).show()
# +---+---+
# | id|age|
# +---+---+
# | 1| 21|
# +---+---+
# $example off:map_iter_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf scalar iterator example")
scalar_iter_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
print("Running pandas_udf grouped agg example")
grouped_agg_pandas_udf_example(spark)
print("Running pandas_udf map iterator example")
map_iter_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
diana-hep/carl | tests/distributions/test_base.py | 1 | 2940 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
import theano
import theano.tensor as T
from numpy.testing import assert_raises
from theano.tensor import TensorVariable
from theano.tensor.sharedvar import SharedVariable
from carl.distributions import DistributionMixin
from carl.distributions import Normal
def test_mixin_base():
# Check raw parameters
p = Normal(mu=0.0, sigma=1.0)
assert isinstance(p, DistributionMixin)
assert len(p.parameters_) == 2
assert p.mu in p.parameters_
assert p.sigma in p.parameters_
assert isinstance(p.mu, SharedVariable)
assert isinstance(p.sigma, SharedVariable)
assert p.mu.get_value() == 0.0
assert p.sigma.get_value() == 1.0
assert len(p.observeds_) == 0
assert isinstance(p.X, TensorVariable)
def test_mixin_external():
# Check external parameters
mu = theano.shared(0.0)
sigma = theano.shared(1.0)
p = Normal(mu=mu, sigma=sigma)
assert mu == p.mu
assert sigma == p.sigma
def test_mixin_constants():
# Check with constants
mu = T.constant(0.0)
sigma = T.constant(1.0)
p = Normal(mu=mu, sigma=sigma)
assert len(p.parameters_) == 0
assert len(p.constants_) == 2
assert mu in p.constants_
assert sigma in p.constants_
def test_mixin_composition():
# Check composed expressions as parameters
a = theano.shared(0.0)
b = theano.shared(-1.0)
mu = a + b - 1.0
sigma = T.abs_(a * b)
p = Normal(mu=mu, sigma=sigma)
assert a in p.parameters_
assert b in p.parameters_
# Compose parameters with observed variables
a = theano.shared(1.0)
b = theano.shared(0.0)
y = T.dmatrix(name="y")
p = Normal(mu=a * y + b)
assert len(p.parameters_) == 3
assert a in p.parameters_
assert b in p.parameters_
assert p.sigma in p.parameters_
assert p.mu not in p.parameters_
assert len(p.observeds_) == 1
assert y in p.observeds_
# Check signatures
data_X = np.random.rand(10, 1)
data_y = np.random.rand(10, 1)
p.pdf(X=data_X, y=data_y)
p.cdf(X=data_X, y=data_y)
p.rvs(10, y=data_y)
# Check error
a = theano.shared(1.0)
b = theano.shared(0.0)
y = T.dmatrix() # y must be named
assert_raises(ValueError, Normal, mu=a * y + b)
def test_mixin_sklearn_params():
# get_params
p = Normal(mu=0.0, sigma=1.0)
params = p.get_params()
assert len(params) == 2
assert "mu" in params
assert "sigma" in params
# for parameters, set_params should change the value contained
old_mu = p.get_params()["mu"]
p.set_params(mu=42.0)
new_mu = p.get_params()["mu"]
assert old_mu is new_mu
assert new_mu.get_value() == 42.0
# check errors
p = Normal(mu=T.constant(0.0), sigma=1.0)
assert_raises(ValueError, p.set_params, mu=1.0)
| bsd-3-clause |
toobaz/pandas | pandas/tests/arrays/sparse/test_array.py | 2 | 45199 | import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype="int")
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype("int64")
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]")
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype("int32")
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(["A", "A", np.nan, "B"], dtype=np.object, fill_value="A")
assert arr.dtype == SparseDtype(np.object, "A")
assert arr.fill_value == "A"
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind="integer")
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64,
fill_value=0,
)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64
)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(
data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None,
fill_value=0,
)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(
data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None
)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize(
"data, fill_value",
[
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp("2017-01-01")], pd.NaT),
],
)
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize("format", ["coo", "csc", "csr"])
@pytest.mark.parametrize(
"size",
[pytest.param(0, marks=td.skip_if_np_lt("1.16", reason="NumPy-11383")), 10],
)
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format="csc")
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize(
"scalar,dtype",
[
(False, SparseDtype(bool, False)),
(0.0, SparseDtype("float64", 0)),
(1, SparseDtype("int64", 1)),
("z", SparseDtype("object", "z")),
],
)
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray(
[0, 0, 0, 1, 1, 2], dtype=np.int64, kind=kind, fill_value=fill
)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray(
[True, False, True, True], dtype=np.bool, kind=kind, fill_value=fill
)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize("fill_value", [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'."
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind="block")
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind="block")
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1.0, np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(
arr.sp_index.indices, np.array([0, 2], dtype=np.int32)
)
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32"))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(
np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype
)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(
np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype
)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match="NA"):
arr.astype("Sparse[i8]")
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True], dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray(
[True, False, False, True], dtype=SparseDtype(bool, False)
)
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()), vals.astype(typ))
@pytest.mark.parametrize(
"array, dtype, expected",
[
(
SparseArray([0, 1]),
"float",
SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)),
),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(
SparseArray([0, 1], fill_value=1),
bool,
SparseArray([False, True], dtype=SparseDtype(bool, True)),
),
pytest.param(
SparseArray([0, 1]),
"datetime64[ns]",
SparseArray(
np.array([0, 1], dtype="datetime64[ns]"),
dtype=SparseDtype("datetime64[ns]", pd.Timestamp("1970")),
),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(
SparseArray([0, 1, 10]),
str,
SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")),
),
(SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])),
(
SparseArray([0, 1, 0]),
object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)),
),
],
)
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match="Cannot convert non-finite"):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize(
"data,shape,dtype",
[
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(["A", "A", np.nan, "B"], (4,), np.object),
],
)
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize(
"vals",
[
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
],
)
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:,] # noqa: E231
exp = SparseArray(dense[4:,]) # noqa: E231
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:,] # noqa: E231
exp = SparseArray(dense[4:,], fill_value=0) # noqa: E231
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(
op(first.to_dense(), second.to_dense()), fill_value=first.fill_value
)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = SparseArray([1, 2, 3])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action="always", category=DeprecationWarning)
warnings.filterwarnings(action="always", category=PendingDeprecationWarning)
for _ in sp_arr:
pass
assert len(w) == 0
def test_fillna(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0])
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan])
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)
res = s.fillna(-1)
exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
# float dtype's fill_value is np.nan, replaced by -1
s = SparseArray([0.0, 0.0, 0.0, 0.0])
res = s.fillna(-1)
exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
# int dtype shouldn't have missing. No changes.
s = SparseArray([0, 0, 0, 0])
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
tm.assert_sp_array_equal(res, s)
s = SparseArray([0, 0, 0, 0], fill_value=0)
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=0)
tm.assert_sp_array_equal(res, exp)
# fill_value can be nan if there is no missing hole.
# only fill_value will be changed
s = SparseArray([0, 0, 0, 0], fill_value=np.nan)
assert s.dtype == SparseDtype(np.int64, fill_value=np.nan)
assert np.isnan(s.fill_value)
res = s.fillna(-1)
exp = SparseArray([0, 0, 0, 0], fill_value=-1)
tm.assert_sp_array_equal(res, exp)
def test_fillna_overlap(self):
s = SparseArray([1, np.nan, np.nan, 3, np.nan])
# filling with existing value doesn't replace existing value with
# fill_value, i.e. existing 3 remains in sp_values
res = s.fillna(3)
exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)
tm.assert_numpy_array_equal(res.to_dense(), exp)
s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)
res = s.fillna(3)
exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)
tm.assert_sp_array_equal(res, exp)
def test_nonzero(self):
# Tests regression #21172.
sa = pd.SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
expected = np.array([2, 5, 9], dtype=np.int32)
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
sa = pd.SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0])
result, = sa.nonzero()
tm.assert_numpy_array_equal(expected, result)
class TestSparseArrayAnalytics:
@pytest.mark.parametrize(
"data,pos,neg",
[
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0),
],
)
def test_all(self, data, pos, neg):
# GH 17570
out = SparseArray(data).all()
assert out
out = SparseArray(data, fill_value=pos).all()
assert out
data[1] = neg
out = SparseArray(data).all()
assert not out
out = SparseArray(data, fill_value=pos).all()
assert not out
@pytest.mark.parametrize(
"data,pos,neg",
[
([True, True, True], True, False),
([1, 2, 1], 1, 0),
([1.0, 2.0, 1.0], 1.0, 0.0),
],
)
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_all(self, data, pos, neg):
# GH 17570
out = np.all(SparseArray(data))
assert out
out = np.all(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.all(SparseArray(data))
assert not out
out = np.all(SparseArray(data, fill_value=pos))
assert not out
# raises with a different message on py2.
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.all(SparseArray(data), out=np.array([]))
@pytest.mark.parametrize(
"data,pos,neg",
[
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0),
],
)
def test_any(self, data, pos, neg):
# GH 17570
out = SparseArray(data).any()
assert out
out = SparseArray(data, fill_value=pos).any()
assert out
data[1] = neg
out = SparseArray(data).any()
assert not out
out = SparseArray(data, fill_value=pos).any()
assert not out
@pytest.mark.parametrize(
"data,pos,neg",
[
([False, True, False], True, False),
([0, 2, 0], 2, 0),
([0.0, 2.0, 0.0], 2.0, 0.0),
],
)
@td.skip_if_np_lt("1.15") # prior didn't dispatch
def test_numpy_any(self, data, pos, neg):
# GH 17570
out = np.any(SparseArray(data))
assert out
out = np.any(SparseArray(data, fill_value=pos))
assert out
data[1] = neg
out = np.any(SparseArray(data))
assert not out
out = np.any(SparseArray(data, fill_value=pos))
assert not out
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.any(SparseArray(data), out=out)
def test_sum(self):
data = np.arange(10).astype(float)
out = SparseArray(data).sum()
assert out == 45.0
data[5] = np.nan
out = SparseArray(data, fill_value=2).sum()
assert out == 40.0
out = SparseArray(data, fill_value=np.nan).sum()
assert out == 40.0
def test_numpy_sum(self):
data = np.arange(10).astype(float)
out = np.sum(SparseArray(data))
assert out == 45.0
data[5] = np.nan
out = np.sum(SparseArray(data, fill_value=2))
assert out == 40.0
out = np.sum(SparseArray(data, fill_value=np.nan))
assert out == 40.0
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.sum(SparseArray(data), out=out)
@pytest.mark.parametrize(
"data,expected",
[
(
np.array([1, 2, 3, 4, 5], dtype=float), # non-null data
SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])),
),
(
np.array([1, 2, np.nan, 4, 5], dtype=float), # null data
SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])),
),
],
)
@pytest.mark.parametrize("numpy", [True, False])
def test_cumsum(self, data, expected, numpy):
cumsum = np.cumsum if numpy else lambda s: s.cumsum()
out = cumsum(SparseArray(data))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=np.nan))
tm.assert_sp_array_equal(out, expected)
out = cumsum(SparseArray(data, fill_value=2))
tm.assert_sp_array_equal(out, expected)
if numpy: # numpy compatibility checks.
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(SparseArray(data), out=out)
else:
axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid.
msg = "axis\\(={axis}\\) out of bounds".format(axis=axis)
with pytest.raises(ValueError, match=msg):
SparseArray(data).cumsum(axis=axis)
def test_mean(self):
data = np.arange(10).astype(float)
out = SparseArray(data).mean()
assert out == 4.5
data[5] = np.nan
out = SparseArray(data).mean()
assert out == 40.0 / 9
def test_numpy_mean(self):
data = np.arange(10).astype(float)
out = np.mean(SparseArray(data))
assert out == 4.5
data[5] = np.nan
out = np.mean(SparseArray(data))
assert out == 40.0 / 9
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.mean(SparseArray(data), out=out)
def test_ufunc(self):
# GH 13853 make sure ufunc is applied to fill_value
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([1, np.nan, 2, np.nan, 2])
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=-1)
result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1)
tm.assert_sp_array_equal(abs(sparse), result)
tm.assert_sp_array_equal(np.abs(sparse), result)
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))
tm.assert_sp_array_equal(np.sin(sparse), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))
tm.assert_sp_array_equal(np.sin(sparse), result)
def test_ufunc_args(self):
# GH 13853 make sure ufunc is applied to fill_value, including its arg
sparse = SparseArray([1, np.nan, 2, np.nan, -2])
result = SparseArray([2, np.nan, 3, np.nan, -1])
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 2, -2], fill_value=1)
result = SparseArray([2, 0, 3, -1], fill_value=2)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
sparse = SparseArray([1, -1, 0, -2], fill_value=0)
result = SparseArray([2, 0, 1, -1], fill_value=1)
tm.assert_sp_array_equal(np.add(sparse, 1), result)
@pytest.mark.parametrize("fill_value", [0.0, np.nan])
def test_modf(self, fill_value):
# https://github.com/pandas-dev/pandas/issues/26946
sparse = pd.SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value)
r1, r2 = np.modf(sparse)
e1, e2 = np.modf(np.asarray(sparse))
tm.assert_sp_array_equal(r1, pd.SparseArray(e1, fill_value=fill_value))
tm.assert_sp_array_equal(r2, pd.SparseArray(e2, fill_value=fill_value))
def test_nbytes_integer(self):
arr = SparseArray([1, 0, 0, 0, 2], kind="integer")
result = arr.nbytes
# (2 * 8) + 2 * 4
assert result == 24
def test_nbytes_block(self):
arr = SparseArray([1, 2, 0, 0, 0], kind="block")
result = arr.nbytes
# (2 * 8) + 4 + 4
# sp_values, blocs, blenghts
assert result == 24
def test_asarray_datetime64(self):
s = pd.SparseArray(pd.to_datetime(["2012", None, None, "2013"]))
np.asarray(s)
def test_density(self):
arr = SparseArray([0, 1])
assert arr.density == 0.5
def test_npoints(self):
arr = SparseArray([0, 1])
assert arr.npoints == 1
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestAccessor:
@pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"])
def test_get_attributes(self, attr):
arr = SparseArray([0, 1])
ser = pd.Series(arr)
result = getattr(ser.sparse, attr)
expected = getattr(arr, attr)
assert result == expected
@td.skip_if_no_scipy
def test_from_coo(self):
import scipy.sparse
row = [0, 3, 1, 0]
col = [0, 3, 1, 2]
data = [4, 5, 7, 9]
sp_array = scipy.sparse.coo_matrix((data, (row, col)))
result = pd.Series.sparse.from_coo(sp_array)
index = pd.MultiIndex.from_arrays([[0, 0, 1, 3], [0, 2, 1, 3]])
expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]")
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_to_coo(self):
import scipy.sparse
ser = pd.Series(
[1, 2, 3],
index=pd.MultiIndex.from_product([[0], [1, 2, 3]], names=["a", "b"]),
dtype="Sparse[int]",
)
A, _, _ = ser.sparse.to_coo()
assert isinstance(A, scipy.sparse.coo.coo_matrix)
def test_non_sparse_raises(self):
ser = pd.Series([1, 2, 3])
with pytest.raises(AttributeError, match=".sparse"):
ser.sparse.density
def test_setting_fill_value_fillna_still_works():
# This is why letting users update fill_value / dtype is bad
# astype has the same problem.
arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0)
arr.fill_value = np.nan
result = arr.isna()
# Can't do direct comparison, since the sp_index will be different
# So let's convert to ndarray and check there.
result = np.asarray(result)
expected = np.array([False, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_setting_fill_value_updates():
arr = SparseArray([0.0, np.nan], fill_value=0)
arr.fill_value = np.nan
# use private constructor to get the index right
# otherwise both nans would be un-stored.
expected = SparseArray._simple_new(
sparse_array=np.array([np.nan]),
sparse_index=IntIndex(2, [1]),
dtype=SparseDtype(float, np.nan),
)
tm.assert_sp_array_equal(arr, expected)
@pytest.mark.parametrize(
"arr, loc",
[
([None, 1, 2], 0),
([0, None, 2], 1),
([0, 1, None], 2),
([0, 1, 1, None, None], 3),
([1, 1, 1, 2], -1),
([], -1),
],
)
def test_first_fill_value_loc(arr, loc):
result = SparseArray(arr)._first_fill_value_loc()
assert result == loc
@pytest.mark.parametrize(
"arr", [[1, 2, np.nan, np.nan], [1, np.nan, 2, np.nan], [1, 2, np.nan]]
)
@pytest.mark.parametrize("fill_value", [np.nan, 0, 1])
def test_unique_na_fill(arr, fill_value):
a = pd.SparseArray(arr, fill_value=fill_value).unique()
b = pd.Series(arr).unique()
assert isinstance(a, SparseArray)
a = np.asarray(a)
tm.assert_numpy_array_equal(a, b)
def test_unique_all_sparse():
# https://github.com/pandas-dev/pandas/issues/23168
arr = SparseArray([0, 0])
result = arr.unique()
expected = SparseArray([0])
tm.assert_sp_array_equal(result, expected)
def test_map():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, 12], fill_value=10)
# dict
result = arr.map({0: 10, 1: 11, 2: 12})
tm.assert_sp_array_equal(result, expected)
# series
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
tm.assert_sp_array_equal(result, expected)
# function
result = arr.map(pd.Series({0: 10, 1: 11, 2: 12}))
expected = SparseArray([10, 11, 12], fill_value=10)
tm.assert_sp_array_equal(result, expected)
def test_map_missing():
arr = SparseArray([0, 1, 2])
expected = SparseArray([10, 11, None], fill_value=10)
result = arr.map({0: 10, 1: 11})
tm.assert_sp_array_equal(result, expected)
def test_deprecated_values():
arr = SparseArray([0, 1, 2])
with tm.assert_produces_warning(FutureWarning):
result = arr.values
tm.assert_numpy_array_equal(result, arr.to_dense())
| bsd-3-clause |
NikolaYolov/invenio_backup | modules/webstat/lib/webstat_engine.py | 2 | 87242 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random
try:
import xlwt
xlwt_imported = True
except ImportError:
xlwt_imported = False
from invenio.config import CFG_TMPDIR, CFG_SITE_URL, CFG_SITE_NAME, CFG_BINDIR
from invenio.urlutils import redirect_to_url
from invenio.search_engine import perform_request_search, \
get_collection_reclist, \
get_fieldvalues, \
get_most_popular_field_values
from invenio.dbquery import run_sql, \
wash_table_column_name
from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
from invenio.bibcirculation_utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.bibcirculation_dblayer import get_id_bibrec, \
get_borrower_data
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args.get('collection','All') == 'All':
sql_query_g = ("SELECT creation_date FROM bibrec WHERE " + \
"creation_date > '%s' AND creation_date < '%s' " + \
"ORDER BY creation_date DESC") % \
(lower, upper)
sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
"WHERE creation_date < '%s'" % (lower)
else:
ids = perform_request_search(cc=args['collection'])
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql_query_g = ("SELECT creation_date FROM bibrec WHERE id IN %s AND " + \
"creation_date > '%s' AND creation_date < '%s' " + \
"ORDER BY creation_date DESC") % \
(ids_str, lower, upper)
sql_query_i = "SELECT COUNT(id) FROM bibrec " + \
"WHERE id IN %s AND creation_date < '%s'" % (ids_str, lower)
action_dates = [x[0] for x in run_sql(sql_query_g)]
initial_quantity = run_sql(sql_query_i)[0][0]
return _get_trend_from_actions(action_dates, initial_quantity,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE date > '%s' AND date < '%s' ORDER BY date DESC" % \
(lower, upper)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_comments_frequency(args):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args.get('collection','All') == 'All':
sql = "SELECT date_creation FROM cmtRECORDCOMMENT " + \
"WHERE date_creation > '%s' AND date_creation < '%s'" \
% (lower, upper) + " ORDER BY date_creation DESC"
else:
ids = get_collection_reclist(args['collection']).tolist()
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql = "SELECT date_creation FROM cmtRECORDCOMMENT \
WHERE date_creation > '%s' AND date_creation < '%s' \
AND id_bibrec IN %s ORDER BY date_creation DESC" \
% (lower, upper, ids_str)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_type_distribution(args):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# SQL to determine all simple searches:
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE urlargs LIKE '%p=%' " + \
"AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
simple = [x[0] for x in run_sql(sql)]
# SQL to determine all advanced searches:
sql = "SELECT date FROM query INNER JOIN user_query ON id=id_query " + \
"WHERE urlargs LIKE '%as=1%' " + \
"AND date > '%s' AND date < '%s' ORDER BY date DESC" % (lower, upper)
advanced = [x[0] for x in run_sql(sql)]
# Compute the trend for both types
s_trend = _get_trend_from_actions(simple, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(advanced, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Collect list of timestamps of insertion in the specific collection
if args.get('collection','All') == 'All':
sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
AND download_time < '%s' ORDER BY download_time DESC" % (lower, upper)
else:
ids = get_collection_reclist(args['collection']).tolist()
if len(ids) == 0:
return []
ids_str = str(ids).replace('[', '(').replace(']', ')')
sql = "SELECT download_time FROM rnkDOWNLOADS WHERE download_time > '%s' \
AND download_time < '%s' AND id_bibrec IN %s \
ORDER BY download_time DESC" % (lower, upper, ids_str)
actions = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(actions, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql = "SELECT loaned_on FROM crcLOAN " + \
"WHERE loaned_on > '%s' AND loaned_on < '%s' ORDER BY loaned_on DESC"\
% (lower, upper)
action_dates = [x[0] for x in run_sql(sql)]
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_web_submissions(args):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
if args['doctype'] == 'all':
sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
"WHERE action='SBI' AND cd > '%s' AND cd < '%s'" % (lower, upper) + \
" AND status='finished' ORDER BY cd DESC"
else:
sql_query = "SELECT cd FROM sbmSUBMISSIONS " + \
"WHERE doctype='%s' AND action='SBI' " % args['doctype'] + \
"AND cd > '%s' AND cd < '%s' " % (lower, upper) + \
"AND status='finished' ORDER BY cd DESC"
action_dates = [x[0] for x in run_sql(sql_query)]
return _get_trend_from_actions(action_dates, 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_keyevent_loan_statistics(args):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by user address (=Department)
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > '%s' AND loaned_on < '%s' " % (lower, upper)
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += """AND l.id_crcBORROWER = bor.id AND
bor.address LIKE '%%%s%%' """ % args['user_address']
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE '%%%s%%')" % args['udc']
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = '%s' " % args['item_status']
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b \
WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE '%%%s%%') " % args['publication_date']
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += """AND br.id=l.id_bibrec AND br.creation_date
LIKE '%%%s%%' """ % args['creation_date']
# Number of loans:
loans = run_sql("SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where)[0][0]
# Number of items loaned on the total number of items:
items_loaned = run_sql("SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where)[0][0]
total_items = run_sql("SELECT COUNT(*) FROM crcITEM")[0][0]
loaned_on_total = float(items_loaned) / float(total_items)
# Number of items never loaned on the total number of items
never_loaned_on_total = float(total_items - items_loaned) / float(total_items)
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT DATEDIFF(MIN(loaned_on), MIN(br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
avg_sql += "GROUP BY l.id_bibrec, br.id"
res_avg = run_sql(avg_sql)
if len(res_avg) > 0:
avg = res_avg[0][0]
else:
avg = 0
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE type = 'normal' AND loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
if 'loan_period' in args and args['loan_period'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.loan_period = %s "
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b \
WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s) "
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
# Documents (= records) never loaned:
for rec, copies in run_sql("""SELECT id_bibrec, COUNT(*) FROM crcITEM WHERE
id_bibrec NOT IN (SELECT l.id_bibrec """ + sql_from + sql_where +
") GROUP BY id_bibrec", param):
loans = run_sql("SELECT COUNT(*) %s %s AND l.id_bibrec=%s" %
(sql_from, sql_where, rec), param)[0][0]
try:
creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
except:
creation = datetime.datetime(1970, 01, 01)
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append(('Documents never loaned', book_title_from_MARC(rec), author,
edition, loans, copies, creation))
# Most loaned documents
most_loaned = []
check_num_loans = ""
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
if check_num_loans != "":
check_num_loans = " HAVING " + check_num_loans
mldocs = run_sql("SELECT l.id_bibrec, COUNT(*) " + sql_from + sql_where +
" GROUP BY l.id_bibrec " + check_num_loans, param)
for rec, loans in mldocs:
copies = run_sql("SELECT COUNT(*) FROM crcITEM WHERE id_bibrec=%s", (rec, ))[0][0]
most_loaned.append((rec, loans, copies, loans / copies))
if most_loaned == []:
return (res)
most_loaned.sort(cmp=lambda x, y: cmp(x[3], y[3]))
if len(most_loaned) > 50:
most_loaned = most_loaned[:49]
most_loaned.reverse()
for rec, loans, copies, _ in most_loaned:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
try:
creation = run_sql("SELECT creation_date FROM bibrec WHERE id=%s", (rec, ))[0][0]
except:
creation = datetime.datetime(1970, 01, 01)
res.append(('Most loaned documents', book_title_from_MARC(rec), author,
edition, loans, copies, creation))
return (res)
def get_keyevent_renewals_lists(args):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND l.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql("SELECT i.id_bibrec, SUM(number_of_renewals) "
+ sql_from + sql_where +
" GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC LIMIT 50", param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args):
"""
Data:
- Number of overdue returns in a year
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
returns = run_sql("SELECT COUNT(*) FROM crcLOAN l \
WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' \
OR returned_on > due_date)", (lower, upper))[0][0]
return ((returns, ), )
def get_keyevent_trend_returns_percentage(args):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# SQL to determine overdue returns:
sql = "SELECT due_date FROM crcLOAN " + \
"WHERE loaned_on > %s AND loaned_on < %s AND " + \
"due_date < NOW() AND (returned_on = '0000-00-00 00:00:00' " + \
"OR returned_on > due_date) ORDER BY due_date DESC"
overdue = [x[0] for x in run_sql(sql, (lower, upper))]
# SQL to determine all returns:
sql = "SELECT due_date FROM crcLOAN " + \
"WHERE loaned_on > %s AND loaned_on < %s AND " + \
"due_date < NOW() ORDER BY due_date DESC"
total = [x[0] for x in run_sql(sql, (lower, upper))]
# Compute the trend for both types
s_trend = _get_trend_from_actions(overdue, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(total, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_ill_requests_statistics(args):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 3 months after the date of request
creation on a period of one year
- Percentage of satisfied ILL requests 3 months after the date of
request creation on a period of one year
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a period of one year (with flexibility in the choice of the dates)
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a period of one year (with flexibility in
the choice of the dates)
Filter by
- in a specified time span
- by type of document (book or article)
- by user address
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
# Number of requests:
requests = run_sql("SELECT COUNT(*) " + sql_from + sql_where, param)[0][0]
# Number of satisfied ILL requests 3 months after the date of request creation:
satrequests = run_sql("SELECT COUNT(*) " + sql_from + sql_where +
"AND arrival_date != '0000-00-00 00:00:00' AND \
DATEDIFF(arrival_date, period_of_interest_from) < 90 ", param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, period_of_interest_from, request_date)) "
+ sql_from + sql_where, param)[0][0]
if avgdel is int:
avgdel = int(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql("SELECT AVG(TIMESTAMPDIFF(HOUR, arrival_date, request_date)) "
+ sql_from + sql_where, param)[0][0]
if avgsup is int:
avgsup = int(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# Results:
res = [("Title", "Author", "Edition")]
for item_info in run_sql("SELECT item_info " + sql_from + sql_where + " LIMIT 100", param):
item_info = eval(item_info[0])
try:
res.append((item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
None
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args):
"""
Returns the number of satisfied ILL requests 3 months after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND ill.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
sql = "SELECT request_date " + sql_from + sql_where + \
"AND ADDDATE(request_date, 90) < NOW() AND (arrival_date != '0000-00-00 00:00:00' " + \
"OR arrival_date < ADDDATE(request_date, 90)) ORDER BY request_date DESC"
satisfied = [x[0] for x in run_sql(sql, param)]
# SQL to determine all ILL requests:
sql = "SELECT request_date " + sql_from + sql_where + \
" AND ADDDATE(request_date, 90) < NOW() ORDER BY request_date DESC"
total = [x[0] for x in run_sql(sql, param)]
# Compute the trend for both types
s_trend = _get_trend_from_actions(satisfied, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
a_trend = _get_trend_from_actions(total, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
# Assemble, according to return type
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
# Number of new items:
param += [lower, upper]
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items = run_sql("SELECT COUNT(i.id_bibrec) " + sql_from + sql_where, param)[0][0]
return ((items, ), (new_items, ))
def get_keyevent_items_lists(args):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if len(param) == 0:
sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
sql_from + sql_where + " LIMIT 100")
else:
sqlres = run_sql("SELECT i.barcode, i.id_bibrec " +
sql_from + sql_where + " LIMIT 100", tuple(param))
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = run_sql("""SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND
DATEDIFF(ws.creation_time, lr.request_date) >= 7""" %
(sql_from, custom_table, sql_where), param)[0][0]
# Number of successful hold requests transactions
succesful_holds = run_sql("SELECT COUNT(*) %s %s AND lr.status='done'" %
(sql_from, sql_where), param)[0][0]
# Average time between the hold request date and the date of delivery document in a year
avg = run_sql("""SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date))
%s, %s ws %s AND ws.request_id=lr.id""" %
(sql_from, custom_table, sql_where), param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((holds, ), (succesful_holds, ), (avg, ))
def get_keyevent_loan_request_lists(args):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by user address (=Department)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_from += ", crcBORROWER bor "
sql_where += "AND lr.id_crcBORROWER = bor.id AND bor.address LIKE %s "
param.append('%%%s%%' % args['user_address'])
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b \
WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s)"
param.append('%%%s%%' % args['udc'])
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql("SELECT lr.barcode " + sql_from + sql_where +
" GROUP BY barcode ORDER BY COUNT(*) DESC", param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by user address
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
sql_address = ""
param = [lower, upper, lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_address += ", crcBORROWER bor WHERE id = user AND \
address LIKE %s "
param.append('%%%s%%' % args['user_address'])
# Total number of active users:
users = run_sql("""SELECT COUNT(DISTINCT user)
FROM ((SELECT id_crcBORROWER user %s %s) UNION
(SELECT id_crcBORROWER user %s %s)) res %s""" %
(sql_from_ill, sql_where_ill, sql_from_loan,
sql_where_loan, sql_address), param)[0][0]
return ((users, ), )
def get_keyevent_user_lists(args):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by user address
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['user_address']: borrower address
@type args['user_address']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
sql_address = ""
param = [lower, upper, lower, upper]
if 'user_address' in args and args['user_address'] != '':
sql_address += ", crcBORROWER bor WHERE id = user AND \
address LIKE %s "
param.append('%%%s%%' % args['user_address'])
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql("SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans %s %s GROUP BY id_crcBORROWER)) res %s \
GROUP BY user ORDER BY SUM(trans) DESC"
% (sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan, sql_address), param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals, returns = run_sql("""SELECT COUNT(*),
SUM(number_of_renewals), COUNT(returned_on<>'0000-00-00')
FROM crcLOAN WHERE loaned_on > %s""", (datefrom, ))[0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
sql_query = ["SELECT creation_time FROM %s WHERE creation_time > '%s'"
% (tbl_name, lower)]
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
dates = [x[0] for x in run_sql(sql, tuple(sql_param))]
return _get_trend_from_actions(dates, 0, args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%s'" % (tbl_name,
lower)] # Note: SELECT * technique is okay here
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param year: Year of publication on the journal
@type year: int
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = "909C4p"
# First get records of the year
recids = perform_request_search(p=query, of="id")
# Then return list by tag
pub = list(get_most_popular_field_values(recids, tag))
sel = 0
for elem in pub:
sel += elem[1]
if len(pub) == 0:
return []
if len(recids) - sel != 0:
pub.append(('Others', len(recids) - sel))
pub.append(('TOTAL', len(recids)))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
return
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = float(len(labels))
width = 6 + numb_elem / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (numb_elem * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data linespoints')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/js/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder" style="width:500px;height:400px"></div></div>'+
'<div id="miniature" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview" style="width:250px;height:200px"></div>' +
'<p id="overviewLegend" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime()
- (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData() {""" % \
{'site': CFG_SITE_URL}
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate("%s"),%d]' % \
(_to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += "];\n"
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate("%s"),%d]' % \
(_to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """];
return [d1];
}
"""
# Set options
tics = ""
if settings["xtic_format"] != '':
tics = 'xaxis: { mode:"time",min:parseDate("%s"),max:parseDate("%s")},'\
% (_to_datetime(minx, '%Y-%m-%d %H:%M:%S'),
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S'))
tics += """
yaxis: {
tickDecimals : 0
},
"""
out += """var options ={
series: {
lines: { show: true },
points: { show: false }
},
legend: { show : false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % tics
# Write the plot method in javascript
out += """var startData = getData();
var plot = $.plot($("#placeholder"), startData, options);
var overview = $.plot($("#overview"), startData, {
legend: { show: true, container: $("#overviewLegend") },
series: {
lines: { show: true, lineWidth: 1 },
shadowSize: 0
},
%s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % tics
# Tooltip and zoom
out += """ function showTooltip(x, y, contents) {
$('<div id="tooltip">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint = null;
$("#placeholder").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint != item.datapoint) {
previousPoint = item.datapoint;
$("#tooltip").remove();
var y = item.datapoint[1];
showTooltip(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip").remove();
previousPoint = null;
}
});
$("#placeholder").bind("plotclick", function (event, pos, item) {
if (item) {
plot.highlight(item.series, item.datapoint);
}
});
$("#placeholder").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot = $.plot($("#placeholder"), startData,
$.extend(true, {}, options, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview.setSelection(ranges, true);
});
$("#overview").bind("plotselected", function (event, ranges) {
plot.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>"""
open(path, 'w').write(out)
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_excel(data, req):
"""
Exports the data to excel.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
if not xlwt_imported:
raise Exception("Module xlwt not installed")
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param t_stop: End time for the time domain in format %Y-%m-%d %H:%M:%S
@type t_stop: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates.insert(0, datetime.datetime.max)
# Create an iterator running from the first day of activity
dt_iter = _get_datetime_iter(t_start, granularity, dt_format)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
# If our t_start is more recent than the initial action_dates, we need to
# drop those.
t_start_dt = _to_datetime(t_start, dt_format)
while action_dates[-1] < t_start_dt:
action_dates = action_dates[:-1]
vector = [(None, initial_value)]
# pylint: disable=E1101
old = dt_iter.next()
# pylint: enable=E1101
upcoming_action = action_dates.pop()
for current in dt_iter:
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if initial_value != 0:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
while old <= upcoming_action < current:
actions_here += 1
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = datetime.datetime.max
vector.append((old.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
old = current
# Make sure to stop the iteration at the end time
if current > stop_at:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param format: Format of the 't_start' parameter
@type format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [("available", "Available"), ("requested", "Requested"),
("on loan", "On loan"), ("missing", "Missing")]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE '%%%s%%'" % (tag))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE '%%%s%%'" % (tag[:-1]))
if res:
return res[0][0]
return ''
| gpl-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/axes_grid/demo_colorbar_with_inset_locator.py | 8 | 1111 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig = plt.figure(1, [6, 3])
# first subplot
ax1 = fig.add_subplot(121)
axins1 = inset_axes(ax1,
width="50%", # width = 10% of parent_bbox width
height="5%", # height : 50%
loc=1)
im1=ax1.imshow([[1,2],[2, 3]])
plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3])
axins1.xaxis.set_ticks_position("bottom")
# first subplot
ax = fig.add_subplot(122)
axins = inset_axes(ax,
width="5%", # width = 10% of parent_bbox width
height="50%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im=ax.imshow([[1,2],[2, 3]])
plt.colorbar(im, cax=axins, ticks=[1,2,3])
plt.draw()
plt.show()
| gpl-2.0 |
djrodgerspryor/MCSE-Simulator | analysis.py | 1 | 28401 | import nanosim
import numpy as np
from itertools import repeat, izip, chain
import scipy.ndimage
from scipy.interpolate import griddata
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.filters import gaussian_filter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import MultipleLocator
from sets import Set
from collections import namedtuple
from utilities import *
import numexpr as ne
smoothing_factor = 0.01 # Smoothing width (gaussian std. dev.) as a percentage of time-domain
m = 1000 # Number of points in the smoothed line
y_paddding_percentage = .3 # Percentage of blank space around the y-range of the data
plt = nanosim.plt
ncolours = 12
colourmap_positions = np.linspace(0, 1.0, int(np.ceil(ncolours * 1.5))) # Generate (1.5 * ncolours) evenly-spaced colourmap positions
colourmap_positions = np.array([p for i, p in enumerate(colourmap_positions) if ((i+1) % 3) == 0]) # Reduce to (ncolours) positions by dropping every third
# Drpping every third colour ensures that there are pairs of similar colours and that those pairs are distinct from other pairs
line_colours = list(plt.get_cmap('jet')(colourmap_positions))
image_colours = ('Blues', 'Reds', 'RdPu')
SweepData = namedtuple('SweepData', 'forward, backward')
ComponentData = namedtuple('ComponentData', 'means, time_means, vars_p, vars_m, label')
def voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V, I, Q):
'''
Recursivley (for each dimension) set voltage values, then simulate and grab the data.
'''
if len(inputs) == 0:
# Clear logs
nanosim.reset_logs()
# Run sim
nanosim.simulate(steptime, logging = all_probes)
# Extract data from component logs
for k, p in enumerate(v_probes):
V[k] = p.voltages
for k, p in enumerate(i_probes):
I[k] = p.current
for k, p in enumerate(q_probes):
Q[k] = p.charges
else:
input_component = inputs[0]
v_range = v_ranges[0]
inputs = inputs[1:]
v_ranges = v_ranges[1:]
for j, v in enumerate(v_range):
input_component.set_V(v)
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[:, j], I[:, j], Q[:, j])
def vars_and_means(raw_data, steptime, probes, data_array_shape, flip = False):
'''
Calculate vars and means over both time and repetitions from raw sim data.
'''
if raw_data.size == 0: return [], [], [], []
# The mean/variance values over time for each component, at each voltage level combination, and at each repetition
time_means = np.zeros([len(probes)] + data_array_shape)
time_vars_plus = np.zeros([len(probes)] + data_array_shape)
time_vars_minus = np.zeros([len(probes)] + data_array_shape)
# Index order for these arrays: component, v1, (v2), repetition
time_vars = time_vars_plus, time_vars_minus
# Fill array of means by averaging over time
it = np.nditer(raw_data, flags=['multi_index', 'refs_ok'])
while not it.finished:
events = it[0][()] # Numpy's yields a 0-d array, not the actual object, so this strange getitem call is required to extract it
times, values = zip(*events)
# Weight each value by the period of time until the next measurement
times += (steptime,) # End time
times = np.diff(times)
mean = ne.evaluate('sum(times * values)') / steptime
time_means[it.multi_index] = mean # Assign mean to array
residuals = values - mean # Residual at each event
plus_indicies, minus_indicies = (residuals > 0), (residuals < 0)
residuals = ne.evaluate('(times * (residuals ** 2)) / steptime') # Scaled (Note: square *before* scaling)
residuals = residuals[plus_indicies], residuals[minus_indicies] # +, - residuals
# Assign variance to arrays
time_vars_plus[it.multi_index] = np.sum(residuals[0])
time_vars_minus[it.multi_index] = np.sum(residuals[1])
it.iternext()
# The mean values over repetition for each component, at each voltage level combination
means = np.mean(time_means, axis = -1)
# Index order for these arrays: component, v1, (v2)
# The variance values over repetition for each component, at each voltage level combination
extended_means = means.reshape(means.shape + (1,)) # Extend with a single index in the repetitions-dimension - allows numpy to broadcast properly in the next step
repetition_residuals = time_means - extended_means # Residual at each repetition
# Splot into positive and negative residuals
res_plus, res_minus = np.zeros(repetition_residuals.shape), np.zeros(repetition_residuals.shape)
plus_indicies, minus_indicies = (repetition_residuals > 0), (repetition_residuals < 0)
res_plus[plus_indicies] = repetition_residuals[plus_indicies]
res_minus[minus_indicies] = repetition_residuals[minus_indicies]
repetition_residuals = res_plus**2, res_minus**2
# Add mean variance over time, to mean variance between repetitions, then take the sqrt to get the std. dev.
variances_plus, variances_minus = [np.sqrt(np.mean(t_vars, axis = -1) + np.mean(rep_vars, axis = -1)) for t_vars, rep_vars in zip(time_vars, repetition_residuals)]
# Index order for these arrays: component, v1, (v2)
# Undo backwards-voltage sweep ordering
if flip:
if len(means.shape) == 2: # For dim-1 plots
means = means[:, ::-1]
time_means = time_means[:, ::-1, :]
variances_plus = variances_plus[:, ::-1]
variances_minus = variances_minus[:, ::-1]
elif len(means.shape) == 3: # For dim-2 plots
means = means[:, ::-1, ::-1]
time_means = time_means[:, ::-1, ::-1, :]
variances_plus = variances_plus[:, ::-1, ::-1]
variances_minus = variances_minus[:, ::-1, ::-1]
means = np.transpose(means, (0, 2, 1))
time_means = np.transpose(time_means, (0, 2, 1, 3))
variances_plus = np.transpose(variances_plus, (0, 2, 1))
variances_minus = np.transpose(variances_minus, (0, 2, 1))
return means, time_means, variances_plus, variances_minus
def presmooth_current(I, steptime):
'''
Calculate vars and means for currents over repetitions from raw sim data.
Currents are different to voltages and charges, since they are all instantaneous events; measuring current variance over time has no meaning*.
* Technically you could estimate variance in frequency of events over time, and probably do some Poisson distribution stuff to characterise the
time-variance, but for now, I think it's probably fine the way it is.
'''
if I.size == 0: return [], []
it = np.nditer(I, flags=['multi_index', 'refs_ok'])
while not it.finished:
events = it[0][()] # Numpy's yields a 0-d array, not the actual object, so this strange getitem call is required to extract it
times, currents = zip(*events)
times, currents = spread_signal_spikes(times, currents, x_range = (0, steptime), m = m)
smoothing_window = int(m * 0.4) # Smoothing radius for softening varaiance computation
times = smooth1D(times, k = smoothing_window)
currents = smooth1D(currents, k = smoothing_window)
I[it.multi_index] = zip(times, currents)
it.iternext()
def characterise(runtime, inputs, input_ranges, steps = 200, v_probes = [], i_probes = [], q_probes = [], v_diffs = [], repetitions = 10, raw_data = True, mean_data = True, variance = True, show = True, fig = False, axs = False, colours = None):
'''
Characterise a circuit over two, independant input dimensions.
runtime: time, in seconds that each simulation is to be run for
inputs: list of reservoirs to voltage-sweep with
input_ranges: a 2-tuple for each input that has min and max voltages to sweep over
steps: number of voltage-steps
v_probes: components to be monitored for voltage-levels
i_probes: components to be monitored for current
q_probes: components to be monitored for charge-levels
v_diffs: pairs (tuples) of components whose relative voltage is to be monitored (the plotted value will be: pair[0].V - pair[1].V)
variance: claculate and display variances in behaviour (for stability analysis)
repetitions: run the sim this many times and average the results
raw_data: plot the raw data-points (for dim-1 plots only)
mean_data: plot the mean data-points (for dim-1 plots only)
show: call pyplot.show() at the end. Us ethis if you want to modify the graph externally before showing.
fig, axs: provide fig and axs objects for the plot to work with (if you want external control over the graph). Both must be provided if either is, and axs MUST have one subplot axis for each of voltage, current and charge if they are to be monitored.
colours: change the default list of colours for plotting.
inputs and input_ranges must be at most length two.
At least one of v_probes, i_probes , q_probes or v_diffs should be filled or there will just be an empty plot.
'''
dim = len(inputs) # 2 control voltages, or 1? (only dim 1 and dim 2 are supported)
steptime = runtime/float(steps)
if not colours:
if dim == 1:
colours = line_colours
elif dim == 2:
colours = image_colours
dont_plot = [] # Components that need to monitored for v_diff, but whose voltage shouldn't be independantly plotted.
for pair in v_diffs:
for c in pair:
if c not in v_probes:
dont_plot.append(c)
v_probes += dont_plot # Add the not-for-plotting nodes to the monitoring-list
dont_plot = Set(dont_plot) # For quick membership tests
v_probe_indicies = dict(zip(v_probes, range(len(v_probes)))) # For quick-lookup of data when plotting v_diff pairs
all_probes = v_probes + i_probes + q_probes
all_probe_lists = (v_probes, i_probes, q_probes)
if (fig is False) or (axs is False): # If fig and axs aren't provided as args
ncolumns = 1
if dim == 1:
nplots = len(nonempty(v_probes, i_probes, q_probes))
elif dim == 2:
nplots = len(all_probes) - len(dont_plot)
if variance: ncolumns = 2
fig, axs = plt.subplots(nplots, ncolumns, figsize=(14, 10)) # Plot size
# The data for each component will be stored in these lists in the same order as the components are listed in the v/i/q_probes arguments
data_array_shape = [steps] * dim + [repetitions] # voltages (* voltages) * repetitions
V = np.empty([len(v_probes)] + data_array_shape, dtype = 'object')
I = np.empty([len(i_probes)] + data_array_shape, dtype = 'object')
Q = np.empty([len(q_probes)] + data_array_shape, dtype = 'object')
# Index order for these arrays: component, v1, (v2,) repetition
# Each element of these arrays will be a list of logged time-value tuples
all_data = (V, I, Q)
print 'Calculating...',
# Main computation for forward sweep
for j in xrange(repetitions):
nanosim.reset_sim() # Set time to 0 and clear logs on all components
v_ranges = [np.linspace(r[0], r[1], steps) for r in input_ranges]
# Step over all voltage combinations, simulate, then log the results.
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[..., j], I[..., j], Q[..., j])
print '%.1f%%' % (50 * float(j+1)/repetitions)
v_components, i_components, q_components = [], [], []
all_component_lists = (v_components, i_components, q_components)
presmooth_current(I, steptime)
# Temporarily sort forward-data
for data, probes, component_list in zip(all_data, all_probe_lists, all_component_lists):
means, time_means, vars_p, vars_m = vars_and_means(data, steptime, probes, data_array_shape)
for i, probe in enumerate(probes): component_list.append(ComponentData(means[i], time_means[i], vars_p[i], vars_m[i], probe.label))
inputs.reverse() # Reverse component order
input_ranges = [tuple(reversed(r)) for r in reversed(input_ranges)] # Reverse both range and component order
# Main computation for backward sweep
for j in xrange(repetitions):
nanosim.reset_sim() # Set time to 0 and clear logs on all components
v_ranges = [np.linspace(r[0], r[1], steps) for r in input_ranges]
# Step over all voltage combinations, simulate, then log the results.
voltage_sweep(inputs, v_ranges, steptime, all_probes, v_probes, i_probes, q_probes, V[..., j], I[..., j], Q[..., j])
print '%.1f%%' % (50 + 50 * float(j+1)/repetitions)
# Undo reversal
inputs.reverse()
input_ranges = [tuple(reversed(r)) for r in reversed(input_ranges)]
presmooth_current(I, steptime)
for data, probes, component_list in zip(all_data, all_probe_lists, all_component_lists):
means, time_means, vars_p, vars_m = vars_and_means(data, steptime, probes, data_array_shape, flip = True)
for i, probe in enumerate(probes):
sweep_mean = SweepData(component_list[i].means, means[i])
sweep_time_mean = SweepData(component_list[i].time_means, time_means[i])
sweep_vars_p = SweepData(component_list[i].vars_p, vars_p[i])
sweep_vars_m = SweepData(component_list[i].vars_m, vars_m[i])
component_list[i] = ComponentData(sweep_mean, sweep_time_mean, sweep_vars_p, sweep_vars_m, probe.label)
# Calculate voltage-difference data
for pair in v_diffs:
i, j = v_probe_indicies[pair[0]], v_probe_indicies[pair[1]]
means = SweepData((v_components[i].means.forward - v_components[j].means.forward), (v_components[i].means.backward - v_components[j].means.backward))
time_means = SweepData((v_components[i].time_means.forward - v_components[j].time_means.forward), (v_components[i].time_means.backward - v_components[j].time_means.backward))
vars_p = SweepData((v_components[i].vars_p.forward - v_components[j].vars_p.forward), (v_components[i].vars_p.backward - v_components[j].vars_p.backward))
vars_m = SweepData((v_components[i].vars_m.forward - v_components[j].vars_m.forward), (v_components[i].vars_m.backward - v_components[j].vars_m.backward))
v_components.append(ComponentData(means, time_means, vars_p, vars_m, 'V[%s - %s]' % (pair[0].label, pair[1].label)))
# Remove v_probes that shouldn't be plotted on their own (ie. that were only recorded for voltage-difference calculations)
v_components = [data for i, data in enumerate(v_components[:len(v_probes)]) if v_probes[i] not in dont_plot] + v_components[len(v_probes):]
# Now for the actual plotting. The logic is totally different for 1d or 2d data, so this big if-statement does the branching
if dim == 1:
v_range = input_ranges[0]
X = np.linspace(v_range[0], v_range[1], steps) # Input voltage data
smoothing_window = int(steps * smoothing_factor) # Smoothing width in array-indicies
for ax, components, axlabel in zip(axs, all_component_lists, ('Voltage (V)', 'Current Avg. (e/s)', 'Charge (e)')): # For axis/plot
# Labels
ax.set_title('%s - Voltage Performance' % axlabel.split()[0])
ax.set_xlabel('Probe Voltage (V)')
ax.set_ylabel(axlabel)
for component, colourpair in zip(components, pairwise(colours)): # For each component
for means, time_means, vars_p, vars_m, colour, direction in zip(component.means, component.time_means, component.vars_p, component.vars_m, colourpair, ('->', '<-')): # For forwards and backwards datasets
# Raw Data
if raw_data:
raw_data_alpha = max(1.0/repetitions, 0.01)
for i in range(repetitions):
ax.scatter(X, time_means[..., i], marker = '.', color = colour, alpha = raw_data_alpha)
# Smoothed Mean Curve
ax.plot(smooth1D(X, k = smoothing_window), smooth1D(means, k = smoothing_window), color = colour, alpha = 0.7, lw = 2, label = '%s (%s sweep)' % (component.label, direction))
# Means
if mean_data:
ax.scatter(X, means, color = colour, alpha = 0.4)
# Variances
if variance:
ax.fill_between(X, means - vars_m, means + vars_p, facecolor = colour, alpha=0.05)
# Legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
# Bounds
ax.set_xlim(v_range[0], v_range[1])
all_data = sum(([c.means.forward, c.means.backward] for c in components), [])
# Check that there is any data
if any(len(data) > 0 for data in all_data): ymin, ymax = min(chain(*all_data)), max(chain(*all_data))
else: ymin, ymax = 0, 0
ypadding = y_paddding_percentage * (ymax - ymin)
ax.set_ylim(ymin - ypadding, ymax + ypadding)
# For title
dVdt = '%.1g' % ((v_range[1] - v_range[0])/float(runtime))
if dim == 2:
x_range = input_ranges[0]
y_range = input_ranges[1]
X = np.linspace(x_range[0], x_range[1], steps)
Y = np.linspace(y_range[0], y_range[1], steps)
#X_GRID, Y_GRID = np.meshgrid(X, Y) # Input voltage grid
# Smoothing Params
order = 3
zlevel = 3
smoothing_window = smoothing_factor * steps
#X_GRID = zoom(X_GRID, zlevel, order = order)
#Y_GRID = zoom(Y_GRID, zlevel, order = order)
#Z_GRID = zoom(Z_GRID, zlevel, order = order)
ax_counter = 0
for components, axlabel in zip(all_component_lists, ('Voltage (V)', 'Current Avg. (e/s)', 'Charge (e)')): # For quantity (V, I, Q)
for ax, component in zip(axs[ax_counter:], components): # For each component/plot
ax_counter += 1
if variance:
var_ax = ax[1]
ax = ax[0]
var_ax.set_title(axlabel.split()[0] + ' Variance ' + axlabel.split()[-1])
var_ax.set_xlabel('%s Voltage (V)' % inputs[0].label)
var_ax.set_ylabel('%s Voltage (V)' % inputs[1].label)
# Labels
ax.set_title('%s %s' % (component.label, axlabel))
ax.set_xlabel('%s Voltage (V)' % inputs[0].label)
ax.set_ylabel('%s Voltage (V)' % inputs[1].label)
imgs = []
for means, colourmap, alpha in zip(component.means, colours, (1, 0.5)): # For forwards and backwards datasets
# Smooth
Z_GRID = gaussian_filter(means, smoothing_window)
Z_GRID = zoom(Z_GRID, zlevel, order = order)
# Plot
img = ax.imshow(Z_GRID.T, origin = 'lower', extent = x_range + y_range, aspect = 'auto', alpha = alpha)
img.set_cmap(colourmap)
imgs.append(img)
# Colourbar
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size="5%", pad = 0.1)
cbar = plt.colorbar(imgs[0], cax = cax)
cbar.set_label('Forward sweep')
cbar.set_ticks([])
cax = divider.append_axes('right', size="5%", pad = 0.3)
cbar = plt.colorbar(imgs[1], cax = cax)
cbar.set_label('Backward sweep')
ax.set_xlim(x_range)
ax.set_ylim(y_range)
# Variances
if variance:
variances = (sum(component.vars_p) + sum(component.vars_m)) / 2
# Smooth
variances = gaussian_filter(variances, smoothing_window)
variances = zoom(variances, zlevel, order = order)
# Plot
img = var_ax.imshow(variances.T, origin = 'lower', extent = x_range + y_range, aspect = 'auto', alpha = 1.0)
img.set_cmap(colours[2])
# Colourbar
divider = make_axes_locatable(var_ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(img, cax = cax)
cbar.set_label('%s Variance %s' % (axlabel.split()[0], axlabel.split()[-1]))
var_ax.set_xlim(x_range)
var_ax.set_ylim(y_range)
# For title
dVdt = '(%.1g, %.1g)' % ((x_range[1] - x_range[0])/float(runtime), (y_range[1] - y_range[0])/float(runtime))
# Title
frequency = (1/float(runtime))
frequency = '%.1f %sHz' % prefix(frequency)
fig.text(.5, .93,
'Characterisation Under Stepped (Quasi-DC), %s Bi-Directional\n Voltage Sweeps (mean dV/dt = %s V/s), T=%.1fK, Averaged Over %d repetitions' % (frequency, dVdt, nanosim.T, repetitions),
horizontalalignment='center', fontsize=17)
plt.tight_layout(rect = [0, 0, 1, .93])
print 'done!'
if show: plt.show()
def dynamic_analysis(runtime, v_probes = [], i_probes = [], q_probes = [], v_diffs = [], repetitions = 1, raw_data = True, show = True, fig = False, axs = False, colours = line_colours):
'''
Analyse a circuit over time.
runtime: time, in seconds that each simulation is to be run for
v_probes: components to be monitored for voltage-levels
i_probes: components to be monitored for current
q_probes: components to be monitored for charge-levels
v_diffs: pairs (tuples) of components whose relative voltage is to be monitored (the plotted value will be: pair[0].V - pair[1].V)
repetitions: run the sim this many times and average the results
raw_data: plot the raw data-points?
show: call pyplot.show() at the end. Us ethis if you want to modify the graph externally before showing.
fig, axs: provide fig and axs objects for the plot to work with (if you want external control over the graph). Both must be provided if either is, and axs MUST have one subplot axis for each of voltage, current and charge if they are to be monitored.
colours: change the default list of colours for plotting.
At least one of v_probes, i_probes , q_probes or v_diffs should be filled or there will just be an empty plot.
'''
dont_plot = [] # Components that need to monitored for v_diff, but whose voltage shouldn't be independantly plotted.
for pair in v_diffs:
for c in pair:
if c not in v_probes:
dont_plot.append(c)
v_probes += dont_plot # Add the not-for-plotting nodes to the monitoring-list
dont_plot = Set(dont_plot) # For quick membership tests
v_probe_indicies = dict(zip(v_probes, range(len(v_probes)))) # For quick-lookup of data when plotting v_diff pairs
if (fig is False) or (axs is False): # If fig and axs aren't provided as args
fig, axs = plt.subplots(len(nonempty(v_probes, i_probes, q_probes)), figsize=(14, 10)) # Plot size
# The data for each component will be stored in these lists in the same order as the components are listed in the v/i/q_probes arguments
V = [[] for p in v_probes]
I = [[] for p in i_probes]
Q = [[] for p in q_probes]
all_probes = v_probes + i_probes + q_probes
all_data = (V, I, Q)
print 'Calculating...',
# Main computation
for j in xrange(repetitions):
nanosim.reset_logs() # Clear logs
nanosim.simulate(runtime, logging = all_probes)
# Extract data from component logs
for i, p in enumerate(v_probes):
V[i] += p.voltages
for i, p in enumerate(i_probes):
I[i] += p.current
for i, p in enumerate(q_probes):
Q[i] += p.charges
print '%.1f%%' % (100 * float(j + 1)/repetitions)
# Current is different to voltages because it's made up of delta-functions (which can't be easily interpolated).
# Pre-smooth the delta functions into square-functions so that standard smoothing will work properly
for i in range(len(I)):
t_presmoothed, I_presmoothed = spread_signal_spikes(*zip(*I[i]), x_range = (0, runtime), m = m)
I_presmoothed /= float(repetitions)
I[i] = zip(t_presmoothed, I_presmoothed) # Replace the stored data with this smoothed version
labels = (l for l, x in zip(('Voltage (V)', 'Current (e/s)', 'Charge (e)'), all_data) if bool(x)) # Lables for each graph (used for both title and y-axis)
# Plot all the data (except voltage-diffs)
smoothing_window = int(m * smoothing_factor) # Smoothing width in array-indicies
for ax, data in zip(axs, nonempty(*all_data)): # Loop though V, I and Q
for component_data, colour, component in zip(data, colours[::2], all_probes): # Loop though monitored components
if component in dont_plot: continue # Skip voltage-diff only components
component_data.sort(key = lambda x: x[0]) # Sort data by time
t, y = zip(*component_data)
# Plot raw data
if raw_data: ax.scatter(t, y, color = colour, alpha = 0.4)
# Interpolate data into evenly-spaced samples
x_smooth = np.linspace(0, runtime, m)
y_smooth = np.interp(x_smooth, t, y)
# Gaussian-smooth the interpolated data
x_smooth = smooth1D(x_smooth, k = smoothing_window) # The edges of the data will be auto-trimmed to the region of convolution-validity, so the x data needs to be smoothed and trimmed to match the y-data
y_smooth = smooth1D(y_smooth, k = smoothing_window)
# Plot the smoothed curve
a = ax.plot(x_smooth, y_smooth, color = colour, alpha = 0.7, lw = 2, label = component.label)
# Plot voltage-diffs
ax = axs[0] # V axis
already_plotted = len(v_probes) - len(dont_plot)
for colour, pair in zip(colours[2*already_plotted::2], v_diffs):
i, j = v_probe_indicies[pair[0]], v_probe_indicies[pair[1]]
data1, data2 = V[i], V[j]
# Sort data by time
data1.sort(key = lambda x: x[0])
data2.sort(key = lambda x: x[0])
t1, y1 = zip(*data1)
t2, y2 = zip(*data2)
# Interpolate data into evenly-spaced samples
x_smooth = np.linspace(0, runtime, m)
y1_smooth = np.interp(x_smooth, t1, y1)
y2_smooth = np.interp(x_smooth, t2, y2)
# Calcualte the voltage-difference values
y_smooth = y1_smooth - y2_smooth
# Gaussian-smooth the interpolated data
x_smooth = smooth1D(x_smooth, k = smoothing_window) # The edges of the data will be auto-trimmed to the region of convolution-validity, so the x data needs to be smoothed and trimmed to match the y-data
y_smooth = smooth1D(y_smooth, k = smoothing_window)
# Plot the smoothed curve
a = ax.plot(x_smooth, y_smooth, color = colour, alpha = 0.7, lw = 2, label = 'V[%s - %s]' % (pair[0].label, pair[1].label))
# Finalise the plots with labels, legends and bounds
for ax, label in zip(axs, labels):
# Legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
# Labels
ax.set_xlabel('Time (s)')
ax.set_ylabel(label)
ax.set_title('%s vs Time' % label.split()[0])
# Bounds
ax.set_xlim(0, runtime) # Ensure that all the plots align properly
print 'done!'
# Title
fig.text(.52, .955, 'Dynamic Circuit Analysis', horizontalalignment='center', fontsize=17)
plt.tight_layout(rect = [0, 0, 1, .94])
if show: plt.show()
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
rbiswas4/Twinkles | twinkles/InstcatGenerationBooKeeping.py | 1 | 3405 | """
Each instance catalog has a list of spectra that are also written to disk,
and these instance catalogs are written in mutliple runs.
This module tries to check that all the files
"""
import pandas as pd
import sys, os
class ValidatePhoSimCatalogs(object):
MegaByte = 1024*1024
def __init__(self,
obsHistIDValues,
prefix='InstanceCatalogs/phosim_input_'):
self.obsHistIDValues = obsHistIDValues
self.prefix=prefix
@classmethod
def fromRunlog(cls, runlog='run.log',
prefix='InstanceCatalogs/phosim_input_',
obsHistIDrange=[0, None]):
runs = pd.read_csv(runlog)
if obsHistIDrange[-1] is None:
obsHistIDrange[-1] = len(runs)
obsHistIDValues = runs.obsHistID.values[obsHistIDrange[0]:obsHistIDrange[1]]
return cls(obsHistIDValues=obsHistIDValues, prefix=prefix)
@staticmethod
def filenames(obsHistID, prefix='InstanceCatalogs/phosim_input_'):
"""
return the filenames for the phosim instance catalog and the
spectra tar ball corresponding to the obsHistID.
"""
basename = prefix + str(obsHistID)
spectra_tar = basename + '.tar.gz'
phosimInstanceCatalog = basename + '.txt.gz'
return phosimInstanceCatalog, spectra_tar
@staticmethod
def validateSizes(phosimInstanceCatalog, spectra_tar, unitSize,
minSizePhosim=15, minSizeSpectra=40):
"""
Check that the files exist and have sizes above a minimum size (ie. not
empyty)
"""
minSizeSpectra = minSizeSpectra * unitSize
minSizePhosim = minSizePhosim * unitSize
success = False
try:
spectra_size = os.path.getsize(spectra_tar)
except:
spectra_size = False
try:
phoSimCat_size = os.path.getsize(phosimInstanceCatalog)
except:
phoSimCat_size = False
if phoSimCat_size and spectra_size:
success = (phoSimCat_size > minSizePhosim) and (spectra_size > minSizeSpectra)
if success:
untarredInstanceCatalog = phosimInstanceCatalog.split('.gz')[0]
if os.path.exists(untarredInstanceCatalog):
#print(untarredInstanceCatalog)
os.remove(untarredInstanceCatalog)
return success, phoSimCat_size, spectra_size
def run(self, filename='validateCompleteness'):
f = open(filename + '_success.dat', 'w')
g = open(filename +'_failures.dat', 'w')
for obsHistID in self.obsHistIDValues:
phosimInstanceCatalog, spectra = self.filenames(obsHistID,
self.prefix)
success, phosimSize, spectraSize = self.validateSizes(phosimInstanceCatalog=phosimInstanceCatalog, spectra_tar=spectra, unitSize=self.MegaByte)
if success:
f.write("{0:d},{1:2.1f},{2:2.1f}\n".format(obsHistID,
phosimSize, spectraSize))
else:
g.write("{0:d},{1:2.1f},{2:2.1f}\n".format(obsHistID,
phosimSize, spectraSize))
f.close()
g.close()
if __name__=='__main__':
v = ValidatePhoSimCatalogs.fromRunlog(runlog='FirstSet_obsHistIDs.csv',
obsHistIDrange=[500, 600])
v.run()
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/backends/qt_editor/figureoptions.py | 10 | 8551 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# see the mpl licenses directory for a copy of the license
"""Module that provides a GUI-based editor for matplotlib's figure options"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os.path as osp
import re
import matplotlib
from matplotlib import cm, markers, colors as mcolors
import matplotlib.backends.qt_editor.formlayout as formlayout
from matplotlib.backends.qt_compat import QtGui
def get_icon(name):
basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
return QtGui.QIcon(osp.join(basedir, name))
LINESTYLES = {'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'None': 'None',
}
DRAWSTYLES = {
'default': 'Default',
'steps-pre': 'Steps (Pre)', 'steps': 'Steps (Pre)',
'steps-mid': 'Steps (Mid)',
'steps-post': 'Steps (Post)'}
MARKERS = markers.MarkerStyle.markers
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [('Title', axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
('Min', xmin), ('Max', xmax),
('Label', axes.get_xlabel()),
('Scale', [axes.get_xscale(), 'linear', 'log']),
sep,
(None, "<b>Y-Axis</b>"),
('Min', ymin), ('Max', ymax),
('Label', axes.get_ylabel()),
('Scale', [axes.get_yscale(), 'linear', 'log']),
sep,
('(Re-)Generate automatic legend', False),
]
# Save the unit data
xconverter = axes.xaxis.converter
yconverter = axes.yaxis.converter
xunits = axes.xaxis.get_units()
yunits = axes.yaxis.get_units()
# Sorting for default labels (_lineXXX, _imageXXX).
def cmp_key(label):
match = re.match(r"(_line|_image)(\d+)", label)
if match:
return match.group(1), int(match.group(2))
else:
return label, 0
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
linedict[label] = line
curves = []
def prepare_data(d, init):
"""Prepare entry for FormLayout.
`d` is a mapping of shorthands to style names (a single style may
have multiple shorthands, in particular the shorthands `None`,
`"None"`, `"none"` and `""` are synonyms); `init` is one shorthand
of the initial style.
This function returns an list suitable for initializing a
FormLayout combobox, namely `[initial_name, (shorthand,
style_name), (shorthand, style_name), ...]`.
"""
# Drop duplicate shorthands from dict (by overwriting them during
# the dict comprehension).
name2short = {name: short for short, name in d.items()}
# Convert back to {shorthand: name}.
short2name = {short: name for name, short in name2short.items()}
# Find the kept shorthand for the style specified by init.
canonical_init = name2short[d[init]]
# Sort by representation and prepend the initial value.
return ([canonical_init] +
sorted(short2name.items(),
key=lambda short_and_name: short_and_name[1]))
curvelabels = sorted(linedict, key=cmp_key)
for label in curvelabels:
line = linedict[label]
color = mcolors.to_hex(
mcolors.to_rgba(line.get_color(), line.get_alpha()),
keep_alpha=True)
ec = mcolors.to_hex(line.get_markeredgecolor(), keep_alpha=True)
fc = mcolors.to_hex(line.get_markerfacecolor(), keep_alpha=True)
curvedata = [
('Label', label),
sep,
(None, '<b>Line</b>'),
('Line style', prepare_data(LINESTYLES, line.get_linestyle())),
('Draw style', prepare_data(DRAWSTYLES, line.get_drawstyle())),
('Width', line.get_linewidth()),
('Color (RGBA)', color),
sep,
(None, '<b>Marker</b>'),
('Style', prepare_data(MARKERS, line.get_marker())),
('Size', line.get_markersize()),
('Face color (RGBA)', fc),
('Edge color (RGBA)', ec)]
curves.append([curvedata, label, ""])
# Is there a curve displayed?
has_curve = bool(curves)
# Get / Images
imagedict = {}
for image in axes.get_images():
label = image.get_label()
if label == '_nolegend_':
continue
imagedict[label] = image
imagelabels = sorted(imagedict, key=cmp_key)
images = []
cmaps = [(cmap, name) for name, cmap in sorted(cm.cmap_d.items())]
for label in imagelabels:
image = imagedict[label]
cmap = image.get_cmap()
if cmap not in cm.cmap_d.values():
cmaps = [(cmap, cmap.name)] + cmaps
low, high = image.get_clim()
imagedata = [
('Label', label),
('Colormap', [cmap.name] + cmaps),
('Min. value', low),
('Max. value', high)]
images.append([imagedata, label, ""])
# Is there an image displayed?
has_image = bool(images)
datalist = [(general, "Axes", "")]
if curves:
datalist.append((curves, "Curves", ""))
if images:
datalist.append((images, "Images", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
general = data.pop(0)
curves = data.pop(0) if has_curve else []
images = data.pop(0) if has_image else []
if data:
raise ValueError("Unexpected field")
# Set / General
(title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale,
generate_legend) = general
if axes.get_xscale() != xscale:
axes.set_xscale(xscale)
if axes.get_yscale() != yscale:
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
# Restore the unit data
axes.xaxis.converter = xconverter
axes.yaxis.converter = yconverter
axes.xaxis.set_units(xunits)
axes.yaxis.set_units(yunits)
axes.xaxis._update_axisinfo()
axes.yaxis._update_axisinfo()
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
(label, linestyle, drawstyle, linewidth, color, marker, markersize,
markerfacecolor, markeredgecolor) = curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_drawstyle(drawstyle)
line.set_linewidth(linewidth)
rgba = mcolors.to_rgba(color)
line.set_alpha(None)
line.set_color(rgba)
if marker is not 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# Set / Images
for index, image_settings in enumerate(images):
image = imagedict[imagelabels[index]]
label, cmap, low, high = image_settings
image.set_label(label)
image.set_cmap(cm.get_cmap(cmap))
image.set_clim(*sorted([low, high]))
# re-generate legend, if checkbox is checked
if generate_legend:
draggable = None
ncol = 1
if axes.legend_ is not None:
old_legend = axes.get_legend()
draggable = old_legend._draggable is not None
ncol = old_legend._ncol
new_legend = axes.legend(ncol=ncol)
if new_legend:
new_legend.draggable(draggable)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(datalist, title="Figure options", parent=parent,
icon=get_icon('qt4_editor_options.svg'),
apply=apply_callback)
if data is not None:
apply_callback(data)
| apache-2.0 |
waynenilsen/statsmodels | statsmodels/datasets/longley/data.py | 25 | 1930 | """Longley dataset"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
The classic 1967 Longley Data
http://www.itl.nist.gov/div898/strd/lls/data/Longley.shtml
::
Longley, J.W. (1967) "An Appraisal of Least Squares Programs for the
Electronic Comptuer from the Point of View of the User." Journal of
the American Statistical Association. 62.319, 819-41.
"""
DESCRSHORT = """"""
DESCRLONG = """The Longley dataset contains various US macroeconomic
variables that are known to be highly collinear. It has been used to appraise
the accuracy of least squares routines."""
NOTE = """::
Number of Observations - 16
Number of Variables - 6
Variable name definitions::
TOTEMP - Total Employment
GNPDEFL - GNP deflator
GNP - GNP
UNEMP - Number of unemployed
ARMED - Size of armed forces
POP - Population
YEAR - Year (1947 - 1962)
"""
from numpy import recfromtxt, array, column_stack
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath+'/longley.csv',"rb"), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| bsd-3-clause |
glouppe/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
rishikksh20/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
hypergravity/bopy | bopy/spec/line_indices.py | 1 | 21682 | # -*- coding: utf-8 -*-
"""
Author
------
Bo Zhang
Email
-----
bozhang@nao.cas.cn
Created on
----------
- Fri Jul 3 13:13:06 2015 read_spectrum
Modifications
-------------
- Wed Jul 29 21:46:00 2015 measure_line_index
- Fri Nov 20 10:16:59 2015 reformatting code
- Sat Jan 16 19:55:57 2016 migrate from spec.py
- Thu Jul 14 23:57:57 2016 plot every line indice
- Sat Jul 23 17:35:00 2016 update line_indices.py
Aims
----
- measure line index from spectra
"""
from __future__ import division
import os
import collections
import numpy as np
import matplotlib.pyplot as plt
from lmfit.models import LinearModel, GaussianModel
from .read_spectrum import read_spectrum
# from bopy.spec.read_spectrum import read_spectrum
# should consider whether to maintain filepath arg
# since the plot function could be replaced using recover
def measure_line_index(wave,
flux,
flux_err=None,
mask=None,
z=None,
line_info=None,
num_refit=(100, None),
filepath=None,
return_type='dict',
verbose=False):
""" Measure line index / EW and have it plotted
Parameters
----------
wave: array-like
wavelength vector
flux: array-like
flux vector
flux_err: array-like
flux error vector (optional)
If un-specified, auto-generate an np.ones array
mask: array-like
andmask or ormask (optional)
If un-specified, auto-generate an np.ones array (evenly weighted)
line_info: dict
information about spectral line, eg:
line_info_dib5780 = {'line_center': 5780,
'line_range': (5775, 5785),
'line_shoulder_left': (5755, 5775),
'line_shoulder_right': (5805, 5825)}
num_refit: non-negative integer
number of refitting.
If 0, no refit will be performed
If positive, refits will be performed after adding normal random noise
z: float
redshift (only specify when z is large)
filepath: string
path of the diagnostic figure
if None, do nothing, else print diagnostic figure
return_type: string
'dict' or 'array'
if 'array', np.array(return dict.values())
verbose: bool
if True, print details
Returns
-------
line_indx: dict
A dictionary type result of line index.
If any problem encountered, return the default result (filled with nan).
"""
try:
# 0. do some input check
# 0.1> check line_info
line_info_keys = line_info.keys()
assert 'line_range' in line_info_keys
assert 'line_shoulder_left' in line_info_keys
assert 'line_shoulder_right' in line_info_keys
# 0.2> check line range/shoulder in spectral range
assert np.min(wave) <= line_info['line_shoulder_left'][0]
assert np.max(wave) >= line_info['line_shoulder_right'][0]
# 1. get line information
# line_center = line_info['line_center'] # not used
line_range = line_info['line_range']
line_shoulder_left = line_info['line_shoulder_left']
line_shoulder_right = line_info['line_shoulder_right']
# 2. data preparation
# 2.1> shift spectra to rest-frame
wave = np.array(wave)
flux = np.array(flux)
if z is not None:
wave /= 1. + z
# 2.2> generate flux_err and mask if un-specified
if flux_err == None:
flux_err = np.ones(wave.shape)
if mask == None:
mask = np.zeros(wave.shape)
mask_ = np.zeros(wave.shape)
ind_mask = np.all([mask!=0],axis=0)
mask_[ind_mask] = 1
mask = mask_
# 3. estimate the local continuum
# 3.1> shoulder wavelength range
ind_shoulder = np.any([
np.all([wave > line_shoulder_left[0],
wave < line_shoulder_left[1]], axis=0),
np.all([wave > line_shoulder_right[0],
wave < line_shoulder_right[1]], axis=0)], axis=0)
wave_shoulder = wave[ind_shoulder]
flux_shoulder = flux[ind_shoulder]
# 3.2> integrated/fitted wavelength range
ind_range = np.logical_and(wave > line_range[0], wave < line_range[1])
wave_range = wave[ind_range]
flux_range = flux[ind_range]
# flux_err_range = flux_err[ind_range] # not used
mask_range = mask[ind_range]
flux_err_shoulder = flux_err[ind_shoulder]
# mask_shoulder = mask[ind_shoulder] # not used
# 4. linear model
mod_linear = LinearModel(prefix='mod_linear_')
par_linear = mod_linear.guess(flux_shoulder, x=wave_shoulder)
# ############################################# #
# to see the parameter names: #
# model_linear.param_names #
# {'linear_fun_intercept', 'linear_fun_slope'} #
# ############################################# #
out_linear = mod_linear.fit(flux_shoulder,
par_linear,
x=wave_shoulder,
method='leastsq')
# 5. estimate continuum
cont_shoulder = out_linear.best_fit
noise_std = np.std(flux_shoulder / cont_shoulder)
cont_range = mod_linear.eval(out_linear.params, x=wave_range)
resi_range = 1 - flux_range / cont_range
# 6.1 Integrated EW (
# estimate EW_int
wave_diff = np.diff(wave_range)
wave_step = np.mean(np.vstack([np.hstack([wave_diff[0], wave_diff]),
np.hstack([wave_diff, wave_diff[-1]])]),
axis=0)
EW_int = np.dot(resi_range, wave_step)
# estimate EW_int_err
num_refit_ = num_refit[0]
if num_refit_ is not None and num_refit_>0:
EW_int_err = np.std(np.dot(
(resi_range.reshape(1, -1).repeat(num_refit_, axis=0) +
np.random.randn(num_refit_, resi_range.size) * noise_std),
wave_step))
# 6.2 Gaussian model
# estimate EW_fit
mod_gauss = GaussianModel(prefix='mod_gauss_')
par_gauss = mod_gauss.guess(resi_range, x=wave_range)
out_gauss = mod_gauss.fit(resi_range, par_gauss, x=wave_range)
line_indx = collections.OrderedDict([
('SN_local_flux_err', np.median(flux_shoulder / flux_err_shoulder)),
('SN_local_flux_std', 1. / noise_std),
('num_bad_pixel', np.sum(mask_range != 0)),
('EW_int', EW_int),
('EW_int_err', EW_int_err),
('mod_linear_slope', out_linear.params[mod_linear.prefix + 'slope'].value),
('mod_linear_slope_err', out_linear.params[mod_linear.prefix + 'slope'].stderr),
('mod_linear_intercept', out_linear.params[mod_linear.prefix + 'intercept'].value),
('mod_linear_intercept_err', out_linear.params[mod_linear.prefix + 'intercept'].stderr),
('mod_gauss_amplitude', out_gauss.params[mod_gauss.prefix + 'amplitude'].value),
('mod_gauss_amplitude_err', out_gauss.params[mod_gauss.prefix + 'amplitude'].stderr),
('mod_gauss_center', out_gauss.params[mod_gauss.prefix + 'center'].value),
('mod_gauss_center_err', out_gauss.params[mod_gauss.prefix + 'center'].stderr),
('mod_gauss_sigma', out_gauss.params[mod_gauss.prefix + 'sigma'].value),
('mod_gauss_sigma_err', out_gauss.params[mod_gauss.prefix + 'sigma'].stderr),
('mod_gauss_amplitude_std', np.nan),
('mod_gauss_center_std', np.nan),
('mod_gauss_sigma_std', np.nan)])
# estimate EW_fit_err
num_refit_ = num_refit[1]
if num_refit_ is not None and num_refit_ > 2:
# {'mod_gauss_amplitude',
# 'mod_gauss_center',
# 'mod_gauss_fwhm',
# 'mod_gauss_sigma'}
out_gauss_refit_amplitude = np.zeros(num_refit_)
out_gauss_refit_center = np.zeros(num_refit_)
out_gauss_refit_sigma = np.zeros(num_refit_)
# noise_fit = np.random.randn(num_refit,resi_range.size)*noise_std
for i in range(int(num_refit_)):
# resi_range_with_noise = resi_range + noise_fit[i,:]
resi_range_with_noise = resi_range + \
np.random.randn(resi_range.size) * noise_std
out_gauss_refit = mod_gauss.fit(resi_range_with_noise,
par_gauss,
x=wave_range)
out_gauss_refit_amplitude[i],\
out_gauss_refit_center[i],\
out_gauss_refit_sigma[i] =\
out_gauss_refit.params[mod_gauss.prefix + 'amplitude'].value,\
out_gauss_refit.params[mod_gauss.prefix + 'center'].value,\
out_gauss_refit.params[mod_gauss.prefix + 'sigma'].value
print(out_gauss_refit_amplitude[i], out_gauss_refit_center[i], out_gauss_refit_sigma[i])
line_indx.update([
('mod_gauss_amplitude_std', np.nanstd(out_gauss_refit_amplitude)),
('mod_gauss_center_std', np.nanstd(out_gauss_refit_center)),
('mod_gauss_sigma_std', np.nanstd(out_gauss_refit_sigma))
])
# 7. plot and save image
if filepath is not None and os.path.exists(os.path.dirname(filepath)):
save_image_line_indice(filepath, wave, flux, ind_range, cont_range,
ind_shoulder, line_info)
# if necessary, convert to array
# NOTE: for a non-ordered dict the order of keys and values may change!
if return_type == 'array':
return np.array(line_indx.values())
return line_indx
except Exception:
return measure_line_index_null_result(return_type)
def measure_line_index_null_result(return_type):
"""generate default value (nan/False) when measurement fails
Returns
-------
default value (nan/False)
"""
line_indx = collections.OrderedDict([
('SN_local_flux_err', np.nan),
('SN_local_flux_std', np.nan),
('num_bad_pixel', np.nan),
('EW_int', np.nan),
('EW_int_err', np.nan),
('mod_linear_slope', np.nan),
('mod_linear_slope_err', np.nan),
('mod_linear_intercept', np.nan),
('mod_linear_intercept_err', np.nan),
('mod_gauss_amplitude', np.nan),
('mod_gauss_amplitude_err', np.nan),
('mod_gauss_center', np.nan),
('mod_gauss_center_err', np.nan),
('mod_gauss_sigma', np.nan),
('mod_gauss_sigma_err', np.nan),
('mod_gauss_amplitude_std', np.nan),
('mod_gauss_center_std', np.nan),
('mod_gauss_sigma_std', np.nan)])
if return_type == 'array':
return np.array(line_indx.values())
return line_indx
# pure fit: 100 loops, best of 3: 8.06 ms per loop (1 int + 1 fit)
# 1000 re-fit: 1 loops, best of 3: 378 ms per loop (1 int + 1 fit + 100 re-fit)
def measure_line_index_loopfun(filepath):
"""loopfun for measuring line index
Parameters
----------
filepath: string
path of the spec document
Returns
-------
several line_indx: tuple
every line_indx is a dictionary type result of line index.
"""
num_refit = 100, None
return_type = 'array'
line_info_dib5780 = {'line_center': 5780,
'line_range': (5775, 5785),
'line_shoulder_left': (5755, 5775),
'line_shoulder_right': (5805, 5825)}
line_info_dib5797 = {'line_center': 5797,
'line_range': (5792, 5802),
'line_shoulder_left': (5755, 5775),
'line_shoulder_right': (5805, 5825)}
line_info_dib6284 = {'line_center': 6285,
'line_range': (6280, 6290),
'line_shoulder_left': (6260, 6280),
'line_shoulder_right': (6310, 6330)}
try:
# read spectrum
# -------------
spec = read_spectrum(filepath, 'auto')
# measure DIBs
# ------------
# DIB5780
line_indx_dib5780 = measure_line_index(wave=spec['wave'],
flux=spec['flux'],
flux_err=spec['flux_err'],
mask=spec['and_mask'],
line_info=line_info_dib5780,
num_refit=num_refit,
return_type=return_type,
z=0)
# DIB5797
line_indx_dib5797 = measure_line_index(wave=spec['wave'],
flux=spec['flux'],
flux_err=spec['flux_err'],
mask=spec['and_mask'],
line_info=line_info_dib5797,
num_refit=num_refit,
return_type=return_type,
z=0)
# DIB6284
line_indx_dib6284 = measure_line_index(wave=spec['wave'],
flux=spec['flux'],
flux_err=spec['flux_err'],
mask=spec['and_mask'],
line_info=line_info_dib6284,
num_refit=num_refit,
return_type=return_type,
z=0)
return line_indx_dib5780, line_indx_dib5797, line_indx_dib6284
except Exception:
return (measure_line_index_null_result(return_type),
measure_line_index_null_result(return_type),
measure_line_index_null_result(return_type))
def measure_line_index_recover_spectrum(wave, params, norm=False):
""" recover the fitted line profile from params
Parameters
----------
wave: array-like
the wavelength to which the recovered flux correspond
params: 5-element tuple
the 1 to 5 elements are:
mod_linear_slope
mod_linear_intercept
mod_gauss_amplitude
mod_gauss_center
mod_gauss_sigma
norm: bool
if True, linear model (continuum) is deprecated
else linear + Gaussian model is used
"""
from lmfit.models import LinearModel, GaussianModel
mod_linear = LinearModel(prefix='mod_linear_')
mod_gauss = GaussianModel(prefix='mod_gauss_')
par_linear = mod_linear.make_params()
par_gauss = mod_gauss.make_params()
par_linear['mod_linear_slope'].value = params[0]
par_linear['mod_linear_intercept'].value = params[1]
par_gauss['mod_gauss_amplitude'].value = params[2]
par_gauss['mod_gauss_center'].value = params[3]
par_gauss['mod_gauss_sigma'].value = params[4]
if not norm:
flux = 1 - mod_gauss.eval(params=par_gauss, x=wave)
else:
flux = \
(1 - mod_gauss.eval(params=par_gauss, x=wave)) * \
mod_linear.eval(params=par_linear, x=wave)
return flux
def save_image_line_indice(filepath, wave, flux, ind_range, cont_range,
ind_shoulder, line_info):
"""Plot a line indice and save it as a .png document.
Parameters
----------
filepath: string
path of the spec document
wave: array
wavelength vector
flux: array
flux vector
ind_range: array
bool indicating the middle range of a particular line
cont_range: array
continuum flux of the middle range derived from linear model
ind_shoulder: array
bool indicating the shoulder range of a particular line
line_info: dict
information about spectral line, eg:
line_info_dib5780 = {'line_center': 5780,
'line_range': (5775, 5785),
'line_shoulder_left': (5755, 5775),
'line_shoulder_right': (5805, 5825)}
"""
filename = os.path.basename(filepath)
fig = plt.figure()
plt.plot(wave[ind_range], flux[ind_range], 'r-')
plt.plot(wave[ind_range], cont_range, 'b-')
plt.plot(wave[ind_shoulder], flux[ind_shoulder], 'm-')
plt.title(r'line' + str(line_info['line_center']) + r'of ' + filename)
fig.savefig(filepath)
def test_():
# filepath = walk_dir()
# filesource = 'auto'
filepath = r'/pool/lamost/dr2/spectra/fits/F5902/spec-55859-F5902_sp01-001.fits'
filesource = 'lamost_dr2'
spec = read_spectrum(filepath, filesource)
# 10 loops, best of 3: 35.7 ms per loop
# line_indx_pack = measure_line_index_loopfun(filepath)
z = 0.00205785
line_info_dib6284 = {'line_center': 6285,
'line_range': (6280, 6290),
'line_shoulder_left': (6260, 6280),
'line_shoulder_right': (6310, 6330)}
line_indx = measure_line_index(wave=spec['wave'],
flux=spec['flux'],
flux_err=spec['flux_err'],
mask=spec['and_mask'],
line_info=line_info_dib6284,
num_refit=(100, 0),
return_type='dict',
z=z)
for key in line_indx.keys():
print (key, line_indx[key])
print(np.sum(np.isnan(line_indx.values())))
'''
45 ms for integration and other procedures
380 ms for 100 refits
In the fastest way (45ms), run 40 line indices on 4 million spectra:
0.045*40*4E6/24/86400 ~ 3.5 days
In the slowest way (380ms)
0.420*40*4E6/24/86400 ~ 32.5 days
'''
# I don't think this function should be implemented here,
# it could be useful if under the bopy.core package
def walk_dir(dirpath):
""" enumerate all files under dirpath
Parameters
----------
dirpath: string
the directory to be walked in
Returns
-------
filename: list
filepaths of all the spectra in finder dirpath
"""
filename_list = []
for parent, dirnames, filenames in os.walk(dirpath):
filename_list.extend([os.path.join(parent, filename)
for filename in filenames])
n = len(filename_list)
filename_list = filename_list[1:n+1]
return filename_list
# for fucntions below, consider whether it is need to be here
# #############################################################################
def test_measure_line_index():
filepath = walk_dir('')
n = len(filepath)
line_indx_star = [[]for i in range(3)]
for i in range(n):
line_indx = measure_line_index_loopfun(filepath[i])
line_indx_star[0].append(line_indx[0])
line_indx_star[1].append(line_indx[1])
line_indx_star[2].append(line_indx[2])
return line_indx_star
def get_equivalent_width(line_indx_star):
EW = [[] for i in range(3)]
n = len(line_indx_star[0])
for i in range(3):
for j in range(n):
EW[i].append(line_indx_star[i][j]['EW_int'])
return EW
def plot_equivalent_width_hist(EW_star):
titles = ["5780", "5797", "6285"]
fig, axes = plt.subplots(1, 3, figsize=(8, 8))
for i in range(3):
ax = axes[0, i]
ax.hist(EW_star[i], facecolor='red', alpha=0.5)
ax.set_xlabel('equivalent width')
ax.set_ylabel('number')
ax.set_title('Histogram of equivalent width_line'+titles[i])
plt.tight_layout()
plt.show()
def plot_line_indices(EW_star):
titles = ["5780", "5797", "6285"]
fig, axes = plt.subplots(3, 3, figsize=(64, 64))
for i in range(3):
for j in range(i+1):
ax = axes[i, j]
ax.set_title(titles[i]+" - "+titles[j], fontsize = 8)
ax.set_ylabel(titles[i], fontsize=8)
ax.set_xlabel(titles[j], fontsize=8)
ax.plot(EW_star[j], EW_star[i], 'ob',markersize=3, alpha=0.5)
plt.tight_layout()
# #############################################################################
# %% test
if __name__ == '__main__':
# line_indx_star = test_measure_line_index()
# EW_star = get_equivalent_width(line_indx_star)
# plot_equivalent_width_hist(EW_star)
# plot_line_indices(EW_star)
test_()
| bsd-3-clause |
ChinmaiRaman/phys227-final | final.py | 1 | 6752 | #! /usr/bin/env python
"""
File: final.py
Copyright (c) 2016 Chinmai Raman
License: MIT
Course: PHYS227
Assignment: Final
Date: May 21, 2016
Email: raman105@mail.chapman.edu
Name: Chinmai Raman
Description: Final
"""
from __future__ import division
from unittest import TestCase
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class Rossler():
def __init__(self, c, dt = 0.001, T0 = 250, T = 500):
self.dt = float(dt)
self.T = float(T)
self.T0 = T0
self.c = float(c)
self.t = np.linspace(0.0, self.T, self.T / self.dt)
self.x = np.zeros(len(self.t))
self.y = np.zeros(len(self.t))
self.z = np.zeros(len(self.t))
self.x0 = 0
self.y0 = 0
self.z0 = 0
def f1(self, x, y, z, t):
return -1 * y - 1 * z
def f2(self, x, y, z, t):
return x + 0.2 * y
def f3(self, x, y, z, t):
return 0.2 + z * (x - self.c)
def run(self):
"""
Implements the fourth order Runge-Kutta method of differentiation.
"""
dt = self.dt
x = self.x
y = self.y
z = self.z
t = self.t
f1 = self.f1
f2 = self.f2
f3 = self.f3
for i in np.arange(0, len(t) - 1):
k1_x = dt * f1(x[i], y[i], z[i], t[i])
k1_y = dt * f2(x[i], y[i], z[i], t[i])
k1_z = dt * f3(x[i], y[i], z[i], t[i])
k2_x = dt * f1(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k2_y = dt * f2(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k2_z = dt * f3(x[i] + 0.5 * k1_x, y[i] + 0.5 * k1_y, z[i] + 0.5 * k1_z, t[i] + 0.5 * dt)
k3_x = dt * f1(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k3_y = dt * f2(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k3_z = dt * f3(x[i] + 0.5 * k2_x, y[i] + 0.5 * k2_y, z[i] + 0.5 * k2_z, t[i] + 0.5 * dt)
k4_x = dt * f1(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
k4_y = dt * f2(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
k4_z = dt * f3(x[i] + 0.5 * k3_x, y[i] + 0.5 * k3_y, z[i] + 0.5 * k3_z, t[i+1])
x[i+1] = x[i] + (k1_x + 2 * k2_x + 2 * k3_x + k4_x) / 6
y[i+1] = y[i] + (k1_y + 2 * k2_y + 2 * k3_y + k4_y) / 6
z[i+1] = z[i] + (k1_z + 2 * k2_z + 2 * k3_z + k4_z) / 6
def plotx(self):
t = self.t
T = self.T
x = self.x
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, x, 'b-')
plt.xlabel('t')
plt.ylabel('x(t)')
plt.title('x(t) vs t')
plt.show(fig)
plt.close(fig)
def ploty(self):
t = self.t
T = self.T
y = self.y
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, y, 'b-')
plt.xlabel('t')
plt.ylabel('y(t)')
plt.title('y(t) vs t')
plt.show(fig)
plt.close(fig)
def plotz(self):
t = self.t
T = self.T
z = self.z
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(t, z, 'b-')
plt.xlabel('t')
plt.ylabel('z(t)')
plt.title('z(t) vs t')
plt.show(fig)
plt.close(fig)
def plotxy(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
y = self.y[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(x, y, 'b-')
plt.xlabel('x(t)')
plt.ylabel('y(t)')
plt.title('y(t) vs x(t)')
ax.axis([-12, 12, -12, 12])
plt.show(fig)
plt.close(fig)
def plotyz(self):
t = self.t
T0 = self.T0
y = self.y[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(y, z, 'b-')
plt.xlabel('y(t)')
plt.ylabel('z(t)')
plt.title('z(t) vs y(t)')
ax.axis([-12, 12, 0, 25])
plt.show(fig)
plt.close(fig)
def plotxz(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig, ax = plt.subplots()
ax.grid(True)
plt.plot(x, z, 'b-')
plt.xlabel('x(t)')
plt.ylabel('z(t)')
plt.title('z(t) vs x(t)')
ax.axis([-12, 12, 0, 25])
plt.show(fig)
plt.close(fig)
def plotxyz(self):
t = self.t
T0 = self.T0
x = self.x[np.where(t >= T0)]
y = self.y[np.where(t >= T0)]
z = self.z[np.where(t >= T0)]
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.grid(True)
plt.plot(x, y, z, 'b-')
plt.xlabel('x(t)')
plt.ylabel('y(t)')
ax.set_zlabel("z(t)")
plt.title('z(t) vs y(t) vs x(t)')
ax.axis([-12, 12, -12, 12])
ax.set_zlim((0, 25))
plt.show(fig)
plt.close(fig)
def findmaxima(c, dim):
"""
finds the local maxima of x given a particular c
"""
ros = Rossler(c)
ros.run()
if dim == 'x':
var = ros.x
elif dim == 'y':
var = ros.y
elif dim == 'z':
var = ros.z
values = var[np.where(ros.t >= ros.T0)]
local_max = values[np.where((np.r_[True, values[1:] > values[:-1]] & np.r_[values[:-1] > values[1:], True]) == True)]
return local_max[local_max > 0]
def plotmaxima(dim):
"""
plots local maxima of x,y, or z vs c
"""
c_values = np.linspace(2, 6, 41)
var = [findmaxima(c, dim)[-17:] for c in c_values]
fig = plt.figure(1)
plt.plot(c_values, [elem for elem in var], 'b-')
plt.xlabel('c')
plt.ylabel(dim)
plt.ylim([3,12])
plt.title(dim + ' local maxes vs. c')
plt.show()
class Test_Ros(TestCase):
def test_ros(self):
T = 500
dt = 0.001
x_test = dt * np.arange(0, T / dt)
y_test = dt * np.arange(0, T / dt)
z_test = dt * np.arange(0, T / dt)
def f1(x, y, z, t):
return 1
def f2(x, y, z, t):
return 1
def f3(x, y, z, t):
return 1
test = Rossler(2)
test.f1 = f1
test.f2 = f2
test.f3 = f3
test.run()
print test.x[-10:]
print x_test[-10:]
assert (abs(test.x - x_test) < 1e-3).all() and (abs(test.y - y_test) < 1e-3).all() and (abs(test.z - z_test) < 1e-3).all(), 'Failure' | mit |
opencobra/cobrapy | src/cobra/test/test_flux_analysis/test_deletion.py | 1 | 11849 | # -*- coding: utf-8 -*-
"""Test functionalities of reaction and gene deletions."""
from __future__ import absolute_import
import math
import numpy as np
import pytest
from pandas import Series
from cobra.flux_analysis.deletion import (
double_gene_deletion,
double_reaction_deletion,
single_gene_deletion,
single_reaction_deletion,
)
from cobra.flux_analysis.room import add_room
# Single gene deletion FBA
def test_single_gene_deletion_fba_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using FBA."""
model.solver = all_solvers
benchmark(single_gene_deletion, model)
def test_single_gene_deletion_fba(model, all_solvers):
"""Test single gene deletion using FBA."""
# expected knockouts for textbook model
model.solver = all_solvers
growth_dict = {
"b0008": 0.87,
"b0114": 0.80,
"b0116": 0.78,
"b2276": 0.21,
"b1779": 0.00,
}
result = single_gene_deletion(
model=model, gene_list=list(growth_dict), method="fba", processes=1
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Singe gene deletion MOMA
def test_single_gene_deletion_moma_benchmark(model, benchmark, qp_solvers):
"""Benchmark single gene deletion using MOMA."""
model.solver = qp_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion, model=model, gene_list=genes, method="moma", processes=1
)
def test_single_gene_deletion_moma(model, qp_solvers):
"""Test single gene deletion using MOMA."""
model.solver = qp_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.71,
"b0116": 0.56,
"b2276": 0.11,
"b1779": 0.00,
}
result = single_gene_deletion(
model=model, gene_list=list(growth_dict), method="moma", processes=1
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
def test_single_gene_deletion_moma_reference(model, qp_solvers):
"""Test single gene deletion using MOMA (reference solution)."""
model.solver = qp_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.71,
"b0116": 0.56,
"b2276": 0.11,
"b1779": 0.00,
}
sol = model.optimize()
result = single_gene_deletion(
model=model,
gene_list=list(growth_dict),
method="moma",
solution=sol,
processes=1,
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Single gene deletion linear MOMA
def test_single_gene_deletion_linear_moma_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using linear MOMA."""
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion,
model=model,
gene_list=genes,
method="linear moma",
processes=1,
)
def test_single_gene_deletion_linear_moma(model, all_solvers):
"""Test single gene deletion using linear MOMA (reference solution)."""
model.solver = all_solvers
# expected knockouts for textbook model
growth_dict = {
"b0008": 0.87,
"b0114": 0.76,
"b0116": 0.65,
"b2276": 0.08,
"b1779": 0.00,
}
sol = model.optimize()
result = single_gene_deletion(
model=model,
gene_list=list(growth_dict),
method="linear moma",
solution=sol,
processes=1,
)
for gene, value in growth_dict.items():
assert np.isclose(result.knockout[gene].growth, value, atol=1e-02)
# Single gene deletion ROOM
def test_single_gene_deletion_room_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using ROOM."""
if all_solvers == "glpk":
pytest.skip("GLPK is too slow to run ROOM.")
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion, model=model, gene_list=genes, method="room", processes=1
)
# Single gene deletion linear ROOM
def test_single_gene_deletion_linear_room_benchmark(model, benchmark, all_solvers):
"""Benchmark single gene deletion using linear ROOM."""
model.solver = all_solvers
genes = ["b0008", "b0114", "b2276", "b1779"]
benchmark(
single_gene_deletion,
model=model,
gene_list=genes,
method="linear room",
processes=1,
)
# Single reaction deletion
def test_single_reaction_deletion_benchmark(model, benchmark, all_solvers):
"""Benchmark single reaction deletion."""
model.solver = all_solvers
benchmark(single_reaction_deletion, model=model, processes=1)
def test_single_reaction_deletion(model, all_solvers):
"""Test single reaction deletion."""
model.solver = all_solvers
expected_results = {
"FBA": 0.70404,
"FBP": 0.87392,
"CS": 0,
"FUM": 0.81430,
"GAPD": 0,
"GLUDy": 0.85139,
}
result = single_reaction_deletion(
model=model, reaction_list=list(expected_results), processes=1
)
for reaction, value in expected_results.items():
assert np.isclose(result.knockout[reaction].growth, value, atol=1e-05)
# Single reaction deletion ROOM
def test_single_reaction_deletion_room(room_model, room_solution, all_solvers):
"""Test single reaction deletion using ROOM."""
room_model.solver = all_solvers
expected = Series(
{
"v1": 10.0,
"v2": 5.0,
"v3": 0.0,
"v4": 5.0,
"v5": 5.0,
"v6": 0.0,
"b1": 10.0,
"b2": 5.0,
"b3": 5.0,
},
index=["v1", "v2", "v3", "v4", "v5", "v6", "b1", "b2", "b3"],
)
with room_model:
room_model.reactions.v6.knock_out()
add_room(room_model, solution=room_solution, delta=0.0, epsilon=0.0)
room_sol = room_model.optimize()
assert np.allclose(room_sol.fluxes, expected)
# Single reaction deletion linear ROOM
def test_single_reaction_deletion_linear_room(room_model, room_solution, all_solvers):
"""Test single reaction deletion using linear ROOM."""
room_model.solver = all_solvers
expected = Series(
{
"v1": 10.0,
"v2": 5.0,
"v3": 0.0,
"v4": 5.0,
"v5": 5.0,
"v6": 0.0,
"b1": 10.0,
"b2": 5.0,
"b3": 5.0,
},
index=["v1", "v2", "v3", "v4", "v5", "v6", "b1", "b2", "b3"],
)
with room_model:
room_model.reactions.v6.knock_out()
add_room(
room_model, solution=room_solution, delta=0.0, epsilon=0.0, linear=True
)
linear_room_sol = room_model.optimize()
assert np.allclose(linear_room_sol.fluxes, expected)
# Double gene deletion
def test_double_gene_deletion_benchmark(large_model, benchmark):
"""Benchmark double gene deletion."""
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276", "b1241"]
benchmark(double_gene_deletion, large_model, gene_list1=genes, processes=1)
def test_double_gene_deletion(model):
"""Test double gene deletion."""
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276", "b1241"]
growth_dict = {
"b0720": {
"b0720": 0.0,
"b0724": 0.0,
"b0726": 0.0,
"b1241": 0.0,
"b1276": 0.0,
"b2935": 0.0,
"b4025": 0.0,
},
"b0724": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.814,
"b1241": 0.814,
"b1276": 0.814,
"b2935": 0.814,
"b4025": 0.739,
},
"b0726": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.858,
"b1276": 0.858,
"b2935": 0.858,
"b4025": 0.857,
},
"b1241": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b1276": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b2935": {
"b0720": 0.0,
"b0724": 0.814,
"b0726": 0.858,
"b1241": 0.874,
"b1276": 0.874,
"b2935": 0.874,
"b4025": 0.863,
},
"b4025": {
"b0720": 0.0,
"b0724": 0.739,
"b0726": 0.857,
"b1241": 0.863,
"b1276": 0.863,
"b2935": 0.863,
"b4025": 0.863,
},
}
solution = double_gene_deletion(model, gene_list1=genes, processes=3)
solution_one_process = double_gene_deletion(model, gene_list1=genes, processes=1)
for rxn_a, sub in growth_dict.items():
for rxn_b, growth in sub.items():
sol = solution.knockout[{rxn_a, rxn_b}]
sol_one = solution_one_process.knockout[{rxn_a, rxn_b}]
assert np.isclose(sol.growth, growth, atol=1e-3)
assert np.isclose(sol_one.growth, growth, atol=1e-3)
# Double reaction deletion
def test_double_reaction_deletion_benchmark(large_model, benchmark):
"""Benchmark double reaction deletion."""
reactions = large_model.reactions[1::100]
benchmark(double_reaction_deletion, large_model, reaction_list1=reactions)
def test_double_reaction_deletion(model):
"""Test double reaction deletion."""
reactions = ["FBA", "ATPS4r", "ENO", "FRUpts2"]
growth_dict = {
"FBA": {"ATPS4r": 0.135, "ENO": float("nan"), "FRUpts2": 0.704},
"ATPS4r": {"ENO": float("nan"), "FRUpts2": 0.374},
"ENO": {"FRUpts2": 0.0},
}
solution = double_reaction_deletion(model, reaction_list1=reactions, processes=3)
solution_one_process = double_reaction_deletion(
model, reaction_list1=reactions, processes=1
)
for (rxn_a, sub) in growth_dict.items():
for rxn_b, growth in sub.items():
sol = solution.knockout[{rxn_a, rxn_b}]
sol_one = solution_one_process.knockout[{rxn_a, rxn_b}]
if math.isnan(growth):
assert math.isnan(sol.growth)
assert math.isnan(sol_one.growth)
else:
assert np.isclose(sol.growth, growth, atol=1e-3)
assert np.isclose(sol_one.growth, growth, atol=1e-3)
def test_deletion_accessor(small_model):
"""Test the DataFrame accessor."""
single = single_reaction_deletion(small_model, small_model.reactions[0:10])
double = double_reaction_deletion(small_model, small_model.reactions[0:10])
rxn1 = small_model.reactions[0]
rxn2 = small_model.reactions[1]
with pytest.raises(ValueError):
single.knockout[1]
with pytest.raises(ValueError):
single.knockout[{"a": 1}]
assert single.knockout[rxn1].ids.iloc[0] == {rxn1.id}
assert double.knockout[{rxn1, rxn2}].ids.iloc[0] == {rxn1.id, rxn2.id}
assert all(single.knockout[rxn1.id] == single.knockout[rxn1])
assert all(double.knockout[{rxn1.id, rxn2.id}] == double.knockout[{rxn1, rxn2}])
assert single.knockout[rxn1, rxn2].shape == (2, 3)
assert double.knockout[rxn1, rxn2].shape == (2, 3)
assert double.knockout[{rxn1, rxn2}].shape == (1, 3)
assert double.knockout[{rxn1}, {rxn2}].shape == (2, 3)
| gpl-2.0 |
ltiao/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
mannyfin/VolatilityForecasting | src/function_runs.py | 1 | 5938 | from PastAsPresent import *
from linear_regression import *
from garch_pq_model import GarchModel as gm
# from KNN import KNN
import numpy as np
from KNN import KNN
import pandas as pd
from res2df_list import *
from VAR2 import *
class FunctionCalls(object):
input_data = None
tnplus1 = 0
lr = 0
arch = 0
garchpq = 0
def __init__(self):
pass
def function_runs(self,dates=None, filename=None, stringinput=None, warmup=None,input_data=None, tnplus1=None, lr=None, arch=None, garchpq=None, k_nn=None, var_q=None, LASSO=False):
output = list()
"""tnplus1"""
try:
if tnplus1 is None:
print("Not running tnplus1")
elif tnplus1 is not None :
tnplus1_method = PastAsPresent.tn_pred_tn_plus_1(data=input_data, filename=filename, stringinput=stringinput)
# output['PastAsPresent'] = part1
output = result_to_df_list(list_name=output, method_result=tnplus1_method,
index_value=['PastAsPresent'], column_value=['MSE', 'QL'])
# output['PastAsPresent'] = tnplus1_method
print("Above is Past as present for " + str(stringinput))
except ValueError:
print("Error: Make sure you pass in 1 or 0 for arg in tnplus1... ")
"""Linear Regression"""
try:
if lr is None:
print("Not running linear regression")
elif len(lr)>= 1 & isinstance(lr, list):
for count, elem in enumerate(lr):
LRmethod = LinRegression.lin_reg(data=input_data, n=elem, filename=filename,
stringinput=stringinput, warmup_period=warmup)
# output['LinearRegression_' + str(elem)] = LRmethod[0:2]
output = result_to_df_list(list_name=output, method_result=LRmethod[0:2],
index_value=['LinearRegression_' + str(elem)], column_value=['MSE', 'QL'])
# output['LinearRegression_' + str(elem)] = LRmethod[0:2]
print("Above is LR for " +str(elem)+" "+ str(stringinput) +" Volatilities")
else:
pass
except TypeError:
print("Error: Please pass an array of ints...")
"""ARCH """
try:
#
if arch is None:
print("Not running arch")
elif len(arch) == 3:
ARCH = gm.arch_q_mse(data=input_data, Timedt=stringinput, ret=arch[0], q=arch[1], lags=arch[2],
warmup_period=warmup, filename=filename)
# output['ARCH'] = ARCH
output = result_to_df_list(list_name=output, method_result=ARCH,
index_value=['ARCH'], column_value=['MSE', 'QL'])
print("Above is ARCH for " + str(stringinput))
except TypeError:
print("Error: ARCH, make sure all the params are filled")
"""GARCH """
try:
# 4 is the num of args to pass into the fcn
if garchpq is None:
print("Not running garch")
elif len(garchpq) == 4:
GARCH = gm.garch_pq_mse(data=input_data, Timedt=stringinput, ret=garchpq[0], p=garchpq[1], q=garchpq[2],
lags=garchpq[3], warmup_period=warmup, filename=filename)
# output['GARCH'] = GARCH
output = result_to_df_list(list_name=output, method_result=GARCH,
index_value=['GARCH'], column_value=['MSE', 'QL'])
print("Above is GARCH for " + str(stringinput))
except TypeError:
print("Error: GARCH, make sure all the params are filled")
"""KNN """
try:
# 4 is the num of args to pass into the fcn
if k_nn is None:
print("Not running KNN")
elif len(k_nn) >= 1 & isinstance(k_nn, list):
for count, elem in enumerate(k_nn):
KNNmethod = KNN(vol_data=input_data, k=elem, warmup=warmup, filename=filename, Timedt=stringinput)
# output['KNN_'+str(k_nn)] = KNNmethod
output = result_to_df_list(list_name=output, method_result=KNNmethod,
index_value=['KNN_'+str(k_nn)], column_value=['MSE', 'QL'])
print("Above is KNN for " +str(elem)+ " " + str(stringinput))
except TypeError:
print("Error: KNN, make sure all the params are filled")
"""VAR """
try:
# 4 is the num of args to pass into the fcn
if var_q is None:
print("Not running VAR")
elif len(var_q) >= 1 & isinstance(var_q, list):
for count, elem in enumerate(var_q):
# KNNmethod = KNN(vol_data=input_data, k=elem, warmup=warmup, filename=filename, Timedt=stringinput)
VAR_q = VAR(p=elem, combined_vol=input_data, warmup_period=warmup)\
.VAR_calc(Timedt=stringinput, dates=dates, filename=filename)
import matplotlib.pyplot as plt
plt.show()
# the line below doesnt work at the moment...
# output = result_to_df_list(list_name=output, method_result=VAR_q,
# index_value=['VAR_p='+str(elem)], column_value=['MSE', 'QL'])
print("Above is VAR for p=" +str(elem)+ " " + str(stringinput))
except TypeError:
print("Error: VAR, make sure all the params are filled")
# concatenates the list of df's
output = list_to_df(list_name=output)
return output
| gpl-3.0 |
josephcslater/scipy | scipy/integrate/_bvp.py | 61 | 39966 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# We also need to control residuals of the boundary conditions. But it
# seems that they become very small eventually as the solver progresses,
# i. e. the tolerance for BC are not very important. We set it 1.5 orders
# lower than the BVP tolerance as well.
tol_bc = 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(bc_res < tol_bc)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Total nodes", "Nodes added"))
def print_iteration_progress(iteration, residual, total_nodes, nodes_added):
print("{:^15}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, m,
nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, m, nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
else:
status = 0
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}, "
"maximum relative residual {:.2e}."
.format(iteration, x.shape[0], max_rms_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}, "
"maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}, maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
brian-team/brian2cuda | examples/compartmental/hodgkin_huxley_1952_cpp.py | 1 | 2435 | '''
Hodgkin-Huxley equations (1952).
'''
import os
import matplotlib
matplotlib.use('Agg')
from brian2 import *
name = os.path.basename(__file__).replace('.py', '')
codefolder = os.path.join('code', name)
print('runing example {}'.format(name))
print('compiling model in {}'.format(codefolder))
set_device('cpp_standalone', build_on_run=False) # multiple runs require this change (see below)
morpho = Cylinder(length=10*cm, diameter=2*238*um, n=1000, type='axon')
El = 10.613*mV
ENa = 115*mV
EK = -12*mV
gl = 0.3*msiemens/cm**2
gNa0 = 120*msiemens/cm**2
gK = 36*msiemens/cm**2
# Typical equations
eqs = '''
# The same equations for the whole neuron, but possibly different parameter values
# distributed transmembrane current
Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2
I : amp (point current) # applied current
dm/dt = alpham * (1-m) - betam * m : 1
dn/dt = alphan * (1-n) - betan * n : 1
dh/dt = alphah * (1-h) - betah * h : 1
alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz
betam = 4 * exp(-v/(18*mV))/ms : Hz
alphah = 0.07 * exp(-v/(20*mV))/ms : Hz
betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz
alphan = (0.01/mV) * (-v+10*mV) / (exp((-v+10*mV) / (10*mV)) - 1)/ms : Hz
betan = 0.125*exp(-v/(80*mV))/ms : Hz
gNa : siemens/meter**2
'''
neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=1*uF/cm**2,
Ri=35.4*ohm*cm, method="exponential_euler")
neuron.v = 0*mV
neuron.h = 1
neuron.m = 0
neuron.n = .5
neuron.I = 0
neuron.gNa = gNa0
neuron[5*cm:10*cm].gNa = 0*siemens/cm**2
M = StateMonitor(neuron, 'v', record=True)
run(50*ms, report='text')
neuron.I[0] = 1*uA # current injection at one end
run(3*ms)
neuron.I = 0*amp
run(100*ms, report='text', profile=True)
print(profiling_summary())
# cf. https://brian2.readthedocs.io/en/stable/user/computation.html#multiple-run-calls
device.build( directory=codefolder, compile = True, run = True, debug=False)
for i in range(75, 125, 1):
plot(cumsum(neuron.length)/cm, i+(1./60)*M.v[:, i*5]/mV, 'k')
yticks([])
ylabel('Time [major] v (mV) [minor]')
xlabel('Position (cm)')
axis('tight')
#show()
plotfolder = 'plots'
if not os.path.exists(plotfolder):
os.mkdir(plotfolder)
plotpath = os.path.join(plotfolder, '{}.png'.format(name))
savefig(plotpath)
print('plot saved in {}'.format(plotpath))
print('the generated model in {} needs to removed manually if wanted'.format(codefolder))
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.